1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
3 ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
4 ; RUN: --check-prefixes=CHECK,ZVFH
5 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
6 ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
7 ; RUN: --check-prefixes=CHECK,ZVFH
8 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
9 ; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
10 ; RUN: --check-prefixes=CHECK,ZVFHMIN
11 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
12 ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
13 ; RUN: --check-prefixes=CHECK,ZVFHMIN
15 define <vscale x 1 x bfloat> @vfmin_nxv1bf16_vv(<vscale x 1 x bfloat> %a, <vscale x 1 x bfloat> %b) {
16 ; CHECK-LABEL: vfmin_nxv1bf16_vv:
18 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
19 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
20 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
21 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
22 ; CHECK-NEXT: vfmin.vv v9, v9, v10
23 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
24 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
26 %v = call <vscale x 1 x bfloat> @llvm.minnum.nxv1bf16(<vscale x 1 x bfloat> %a, <vscale x 1 x bfloat> %b)
27 ret <vscale x 1 x bfloat> %v
30 define <vscale x 1 x bfloat> @vfmin_nxv1bf16_vf(<vscale x 1 x bfloat> %a, bfloat %b) {
31 ; CHECK-LABEL: vfmin_nxv1bf16_vf:
33 ; CHECK-NEXT: fmv.x.h a0, fa0
34 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
35 ; CHECK-NEXT: vmv.v.x v9, a0
36 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
37 ; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9
38 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
39 ; CHECK-NEXT: vfmin.vv v9, v10, v8
40 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
41 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
43 %head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
44 %splat = shufflevector <vscale x 1 x bfloat> %head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
45 %v = call <vscale x 1 x bfloat> @llvm.minnum.nxv1bf16(<vscale x 1 x bfloat> %a, <vscale x 1 x bfloat> %splat)
46 ret <vscale x 1 x bfloat> %v
49 declare <vscale x 2 x bfloat> @llvm.minnum.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>)
51 define <vscale x 2 x bfloat> @vfmin_nxv2bf16_vv(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) {
52 ; CHECK-LABEL: vfmin_nxv2bf16_vv:
54 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
55 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
56 ; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
57 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
58 ; CHECK-NEXT: vfmin.vv v9, v9, v10
59 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
60 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
62 %v = call <vscale x 2 x bfloat> @llvm.minnum.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b)
63 ret <vscale x 2 x bfloat> %v
66 define <vscale x 2 x bfloat> @vfmin_nxv2bf16_vf(<vscale x 2 x bfloat> %a, bfloat %b) {
67 ; CHECK-LABEL: vfmin_nxv2bf16_vf:
69 ; CHECK-NEXT: fmv.x.h a0, fa0
70 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
71 ; CHECK-NEXT: vmv.v.x v9, a0
72 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
73 ; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9
74 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
75 ; CHECK-NEXT: vfmin.vv v9, v10, v8
76 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
77 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
79 %head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0
80 %splat = shufflevector <vscale x 2 x bfloat> %head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer
81 %v = call <vscale x 2 x bfloat> @llvm.minnum.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %splat)
82 ret <vscale x 2 x bfloat> %v
85 declare <vscale x 4 x bfloat> @llvm.minnum.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>)
87 define <vscale x 4 x bfloat> @vfmin_nxv4bf16_vv(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) {
88 ; CHECK-LABEL: vfmin_nxv4bf16_vv:
90 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
91 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
92 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
93 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
94 ; CHECK-NEXT: vfmin.vv v10, v12, v10
95 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
96 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
98 %v = call <vscale x 4 x bfloat> @llvm.minnum.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b)
99 ret <vscale x 4 x bfloat> %v
102 define <vscale x 4 x bfloat> @vfmin_nxv4bf16_vf(<vscale x 4 x bfloat> %a, bfloat %b) {
103 ; CHECK-LABEL: vfmin_nxv4bf16_vf:
105 ; CHECK-NEXT: fmv.x.h a0, fa0
106 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
107 ; CHECK-NEXT: vmv.v.x v9, a0
108 ; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
109 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
110 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
111 ; CHECK-NEXT: vfmin.vv v10, v10, v12
112 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
113 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
115 %head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0
116 %splat = shufflevector <vscale x 4 x bfloat> %head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer
117 %v = call <vscale x 4 x bfloat> @llvm.minnum.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %splat)
118 ret <vscale x 4 x bfloat> %v
121 declare <vscale x 8 x bfloat> @llvm.minnum.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
123 define <vscale x 8 x bfloat> @vfmin_nxv8bf16_vv(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) {
124 ; CHECK-LABEL: vfmin_nxv8bf16_vv:
126 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
127 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
128 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
129 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
130 ; CHECK-NEXT: vfmin.vv v12, v16, v12
131 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
132 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
134 %v = call <vscale x 8 x bfloat> @llvm.minnum.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
135 ret <vscale x 8 x bfloat> %v
138 define <vscale x 8 x bfloat> @vfmin_nxv8bf16_vf(<vscale x 8 x bfloat> %a, bfloat %b) {
139 ; CHECK-LABEL: vfmin_nxv8bf16_vf:
141 ; CHECK-NEXT: fmv.x.h a0, fa0
142 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
143 ; CHECK-NEXT: vmv.v.x v10, a0
144 ; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
145 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v10
146 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
147 ; CHECK-NEXT: vfmin.vv v12, v12, v16
148 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
149 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
151 %head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
152 %splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
153 %v = call <vscale x 8 x bfloat> @llvm.minnum.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %splat)
154 ret <vscale x 8 x bfloat> %v
157 declare <vscale x 16 x bfloat> @llvm.minnum.nxv16bf16(<vscale x 16 x bfloat>, <vscale x 16 x bfloat>)
159 define <vscale x 16 x bfloat> @vfmin_nxv16bf16_vv(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b) {
160 ; CHECK-LABEL: vfmin_nxv16bf16_vv:
162 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
163 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
164 ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
165 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
166 ; CHECK-NEXT: vfmin.vv v16, v24, v16
167 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
168 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
170 %v = call <vscale x 16 x bfloat> @llvm.minnum.nxv16bf16(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b)
171 ret <vscale x 16 x bfloat> %v
174 define <vscale x 16 x bfloat> @vfmin_nxv16bf16_vf(<vscale x 16 x bfloat> %a, bfloat %b) {
175 ; CHECK-LABEL: vfmin_nxv16bf16_vf:
177 ; CHECK-NEXT: fmv.x.h a0, fa0
178 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
179 ; CHECK-NEXT: vmv.v.x v12, a0
180 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
181 ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12
182 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
183 ; CHECK-NEXT: vfmin.vv v16, v16, v24
184 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
185 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
187 %head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
188 %splat = shufflevector <vscale x 16 x bfloat> %head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
189 %v = call <vscale x 16 x bfloat> @llvm.minnum.nxv16bf16(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %splat)
190 ret <vscale x 16 x bfloat> %v
193 declare <vscale x 32 x bfloat> @llvm.minnum.nxv32bf16(<vscale x 32 x bfloat>, <vscale x 32 x bfloat>)
195 define <vscale x 32 x bfloat> @vfmin_nxv32bf16_vv(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) {
196 ; CHECK-LABEL: vfmin_nxv32bf16_vv:
198 ; CHECK-NEXT: addi sp, sp, -16
199 ; CHECK-NEXT: .cfi_def_cfa_offset 16
200 ; CHECK-NEXT: csrr a0, vlenb
201 ; CHECK-NEXT: slli a0, a0, 3
202 ; CHECK-NEXT: sub sp, sp, a0
203 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
204 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
205 ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16
206 ; CHECK-NEXT: addi a0, sp, 16
207 ; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
208 ; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8
209 ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20
210 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
211 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
212 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
213 ; CHECK-NEXT: vfmin.vv v0, v0, v8
214 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
215 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0
216 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
217 ; CHECK-NEXT: vfmin.vv v16, v16, v24
218 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
219 ; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16
220 ; CHECK-NEXT: csrr a0, vlenb
221 ; CHECK-NEXT: slli a0, a0, 3
222 ; CHECK-NEXT: add sp, sp, a0
223 ; CHECK-NEXT: .cfi_def_cfa sp, 16
224 ; CHECK-NEXT: addi sp, sp, 16
225 ; CHECK-NEXT: .cfi_def_cfa_offset 0
227 %v = call <vscale x 32 x bfloat> @llvm.minnum.nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b)
228 ret <vscale x 32 x bfloat> %v
231 define <vscale x 32 x bfloat> @vfmin_nxv32bf16_vf(<vscale x 32 x bfloat> %a, bfloat %b) {
232 ; CHECK-LABEL: vfmin_nxv32bf16_vf:
234 ; CHECK-NEXT: addi sp, sp, -16
235 ; CHECK-NEXT: .cfi_def_cfa_offset 16
236 ; CHECK-NEXT: csrr a0, vlenb
237 ; CHECK-NEXT: slli a0, a0, 3
238 ; CHECK-NEXT: sub sp, sp, a0
239 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
240 ; CHECK-NEXT: fmv.x.h a0, fa0
241 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
242 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
243 ; CHECK-NEXT: addi a1, sp, 16
244 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
245 ; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12
246 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
247 ; CHECK-NEXT: vmv.v.x v8, a0
248 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
249 ; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8
250 ; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
251 ; CHECK-NEXT: addi a0, sp, 16
252 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
253 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
254 ; CHECK-NEXT: vfmin.vv v0, v8, v0
255 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
256 ; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0
257 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
258 ; CHECK-NEXT: vfmin.vv v16, v24, v16
259 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
260 ; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16
261 ; CHECK-NEXT: csrr a0, vlenb
262 ; CHECK-NEXT: slli a0, a0, 3
263 ; CHECK-NEXT: add sp, sp, a0
264 ; CHECK-NEXT: .cfi_def_cfa sp, 16
265 ; CHECK-NEXT: addi sp, sp, 16
266 ; CHECK-NEXT: .cfi_def_cfa_offset 0
268 %head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
269 %splat = shufflevector <vscale x 32 x bfloat> %head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
270 %v = call <vscale x 32 x bfloat> @llvm.minnum.nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %splat)
271 ret <vscale x 32 x bfloat> %v
274 declare <vscale x 1 x half> @llvm.minnum.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>)
276 define <vscale x 1 x half> @vfmin_nxv1f16_vv(<vscale x 1 x half> %a, <vscale x 1 x half> %b) {
277 ; ZVFH-LABEL: vfmin_nxv1f16_vv:
279 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
280 ; ZVFH-NEXT: vfmin.vv v8, v8, v9
283 ; ZVFHMIN-LABEL: vfmin_nxv1f16_vv:
285 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
286 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
287 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
288 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
289 ; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10
290 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
291 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
293 %v = call <vscale x 1 x half> @llvm.minnum.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b)
294 ret <vscale x 1 x half> %v
297 define <vscale x 1 x half> @vfmin_nxv1f16_vf(<vscale x 1 x half> %a, half %b) {
298 ; ZVFH-LABEL: vfmin_nxv1f16_vf:
300 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
301 ; ZVFH-NEXT: vfmin.vf v8, v8, fa0
304 ; ZVFHMIN-LABEL: vfmin_nxv1f16_vf:
306 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
307 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
308 ; ZVFHMIN-NEXT: vmv.v.x v9, a0
309 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
310 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
311 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
312 ; ZVFHMIN-NEXT: vfmin.vv v9, v10, v8
313 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
314 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
316 %head = insertelement <vscale x 1 x half> poison, half %b, i32 0
317 %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
318 %v = call <vscale x 1 x half> @llvm.minnum.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %splat)
319 ret <vscale x 1 x half> %v
322 declare <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>)
324 define <vscale x 2 x half> @vfmin_nxv2f16_vv(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
325 ; ZVFH-LABEL: vfmin_nxv2f16_vv:
327 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
328 ; ZVFH-NEXT: vfmin.vv v8, v8, v9
331 ; ZVFHMIN-LABEL: vfmin_nxv2f16_vv:
333 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
334 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
335 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
336 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
337 ; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10
338 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
339 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
341 %v = call <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
342 ret <vscale x 2 x half> %v
345 define <vscale x 2 x half> @vfmin_nxv2f16_vf(<vscale x 2 x half> %a, half %b) {
346 ; ZVFH-LABEL: vfmin_nxv2f16_vf:
348 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
349 ; ZVFH-NEXT: vfmin.vf v8, v8, fa0
352 ; ZVFHMIN-LABEL: vfmin_nxv2f16_vf:
354 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
355 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
356 ; ZVFHMIN-NEXT: vmv.v.x v9, a0
357 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
358 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
359 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
360 ; ZVFHMIN-NEXT: vfmin.vv v9, v10, v8
361 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
362 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
364 %head = insertelement <vscale x 2 x half> poison, half %b, i32 0
365 %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
366 %v = call <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %splat)
367 ret <vscale x 2 x half> %v
370 declare <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>)
372 define <vscale x 4 x half> @vfmin_nxv4f16_vv(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
373 ; ZVFH-LABEL: vfmin_nxv4f16_vv:
375 ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
376 ; ZVFH-NEXT: vfmin.vv v8, v8, v9
379 ; ZVFHMIN-LABEL: vfmin_nxv4f16_vv:
381 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
382 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
383 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
384 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
385 ; ZVFHMIN-NEXT: vfmin.vv v10, v12, v10
386 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
387 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
389 %v = call <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
390 ret <vscale x 4 x half> %v
393 define <vscale x 4 x half> @vfmin_nxv4f16_vf(<vscale x 4 x half> %a, half %b) {
394 ; ZVFH-LABEL: vfmin_nxv4f16_vf:
396 ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
397 ; ZVFH-NEXT: vfmin.vf v8, v8, fa0
400 ; ZVFHMIN-LABEL: vfmin_nxv4f16_vf:
402 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
403 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
404 ; ZVFHMIN-NEXT: vmv.v.x v9, a0
405 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
406 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
407 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
408 ; ZVFHMIN-NEXT: vfmin.vv v10, v10, v12
409 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
410 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
412 %head = insertelement <vscale x 4 x half> poison, half %b, i32 0
413 %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
414 %v = call <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %splat)
415 ret <vscale x 4 x half> %v
418 declare <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
420 define <vscale x 8 x half> @vfmin_nxv8f16_vv(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
421 ; ZVFH-LABEL: vfmin_nxv8f16_vv:
423 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
424 ; ZVFH-NEXT: vfmin.vv v8, v8, v10
427 ; ZVFHMIN-LABEL: vfmin_nxv8f16_vv:
429 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
430 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
431 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
432 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
433 ; ZVFHMIN-NEXT: vfmin.vv v12, v16, v12
434 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
435 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
437 %v = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
438 ret <vscale x 8 x half> %v
441 define <vscale x 8 x half> @vfmin_nxv8f16_vf(<vscale x 8 x half> %a, half %b) {
442 ; ZVFH-LABEL: vfmin_nxv8f16_vf:
444 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
445 ; ZVFH-NEXT: vfmin.vf v8, v8, fa0
448 ; ZVFHMIN-LABEL: vfmin_nxv8f16_vf:
450 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
451 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
452 ; ZVFHMIN-NEXT: vmv.v.x v10, a0
453 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
454 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
455 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
456 ; ZVFHMIN-NEXT: vfmin.vv v12, v12, v16
457 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
458 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
460 %head = insertelement <vscale x 8 x half> poison, half %b, i32 0
461 %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
462 %v = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %splat)
463 ret <vscale x 8 x half> %v
466 declare <vscale x 16 x half> @llvm.minnum.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>)
468 define <vscale x 16 x half> @vfmin_nxv16f16_vv(<vscale x 16 x half> %a, <vscale x 16 x half> %b) {
469 ; ZVFH-LABEL: vfmin_nxv16f16_vv:
471 ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
472 ; ZVFH-NEXT: vfmin.vv v8, v8, v12
475 ; ZVFHMIN-LABEL: vfmin_nxv16f16_vv:
477 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
478 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
479 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
480 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
481 ; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16
482 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
483 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
485 %v = call <vscale x 16 x half> @llvm.minnum.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b)
486 ret <vscale x 16 x half> %v
489 define <vscale x 16 x half> @vfmin_nxv16f16_vf(<vscale x 16 x half> %a, half %b) {
490 ; ZVFH-LABEL: vfmin_nxv16f16_vf:
492 ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
493 ; ZVFH-NEXT: vfmin.vf v8, v8, fa0
496 ; ZVFHMIN-LABEL: vfmin_nxv16f16_vf:
498 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
499 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
500 ; ZVFHMIN-NEXT: vmv.v.x v12, a0
501 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
502 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
503 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
504 ; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24
505 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
506 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
508 %head = insertelement <vscale x 16 x half> poison, half %b, i32 0
509 %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
510 %v = call <vscale x 16 x half> @llvm.minnum.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %splat)
511 ret <vscale x 16 x half> %v
514 declare <vscale x 32 x half> @llvm.minnum.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>)
516 define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale x 32 x half> %b) {
517 ; ZVFH-LABEL: vfmin_nxv32f16_vv:
519 ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
520 ; ZVFH-NEXT: vfmin.vv v8, v8, v16
523 ; ZVFHMIN-LABEL: vfmin_nxv32f16_vv:
525 ; ZVFHMIN-NEXT: addi sp, sp, -16
526 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
527 ; ZVFHMIN-NEXT: csrr a0, vlenb
528 ; ZVFHMIN-NEXT: slli a0, a0, 3
529 ; ZVFHMIN-NEXT: sub sp, sp, a0
530 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
531 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
532 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
533 ; ZVFHMIN-NEXT: addi a0, sp, 16
534 ; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
535 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
536 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
537 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
538 ; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
539 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
540 ; ZVFHMIN-NEXT: vfmin.vv v0, v0, v8
541 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
542 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
543 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
544 ; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24
545 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
546 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
547 ; ZVFHMIN-NEXT: csrr a0, vlenb
548 ; ZVFHMIN-NEXT: slli a0, a0, 3
549 ; ZVFHMIN-NEXT: add sp, sp, a0
550 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
551 ; ZVFHMIN-NEXT: addi sp, sp, 16
552 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
554 %v = call <vscale x 32 x half> @llvm.minnum.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b)
555 ret <vscale x 32 x half> %v
558 define <vscale x 32 x half> @vfmin_nxv32f16_vf(<vscale x 32 x half> %a, half %b) {
559 ; ZVFH-LABEL: vfmin_nxv32f16_vf:
561 ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
562 ; ZVFH-NEXT: vfmin.vf v8, v8, fa0
565 ; ZVFHMIN-LABEL: vfmin_nxv32f16_vf:
567 ; ZVFHMIN-NEXT: addi sp, sp, -16
568 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
569 ; ZVFHMIN-NEXT: csrr a0, vlenb
570 ; ZVFHMIN-NEXT: slli a0, a0, 3
571 ; ZVFHMIN-NEXT: sub sp, sp, a0
572 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
573 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
574 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
575 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
576 ; ZVFHMIN-NEXT: addi a1, sp, 16
577 ; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
578 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
579 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
580 ; ZVFHMIN-NEXT: vmv.v.x v8, a0
581 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
582 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
583 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
584 ; ZVFHMIN-NEXT: addi a0, sp, 16
585 ; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
586 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
587 ; ZVFHMIN-NEXT: vfmin.vv v0, v8, v0
588 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
589 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
590 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
591 ; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16
592 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
593 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
594 ; ZVFHMIN-NEXT: csrr a0, vlenb
595 ; ZVFHMIN-NEXT: slli a0, a0, 3
596 ; ZVFHMIN-NEXT: add sp, sp, a0
597 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
598 ; ZVFHMIN-NEXT: addi sp, sp, 16
599 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
601 %head = insertelement <vscale x 32 x half> poison, half %b, i32 0
602 %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
603 %v = call <vscale x 32 x half> @llvm.minnum.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %splat)
604 ret <vscale x 32 x half> %v
607 declare <vscale x 1 x float> @llvm.minnum.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>)
609 define <vscale x 1 x float> @vfmin_nxv1f32_vv(<vscale x 1 x float> %a, <vscale x 1 x float> %b) {
610 ; CHECK-LABEL: vfmin_nxv1f32_vv:
612 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
613 ; CHECK-NEXT: vfmin.vv v8, v8, v9
615 %v = call <vscale x 1 x float> @llvm.minnum.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b)
616 ret <vscale x 1 x float> %v
619 define <vscale x 1 x float> @vfmin_nxv1f32_vf(<vscale x 1 x float> %a, float %b) {
620 ; CHECK-LABEL: vfmin_nxv1f32_vf:
622 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
623 ; CHECK-NEXT: vfmin.vf v8, v8, fa0
625 %head = insertelement <vscale x 1 x float> poison, float %b, i32 0
626 %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
627 %v = call <vscale x 1 x float> @llvm.minnum.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %splat)
628 ret <vscale x 1 x float> %v
631 declare <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>)
633 define <vscale x 2 x float> @vfmin_nxv2f32_vv(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
634 ; CHECK-LABEL: vfmin_nxv2f32_vv:
636 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
637 ; CHECK-NEXT: vfmin.vv v8, v8, v9
639 %v = call <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
640 ret <vscale x 2 x float> %v
643 define <vscale x 2 x float> @vfmin_nxv2f32_vf(<vscale x 2 x float> %a, float %b) {
644 ; CHECK-LABEL: vfmin_nxv2f32_vf:
646 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
647 ; CHECK-NEXT: vfmin.vf v8, v8, fa0
649 %head = insertelement <vscale x 2 x float> poison, float %b, i32 0
650 %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
651 %v = call <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %splat)
652 ret <vscale x 2 x float> %v
655 declare <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
657 define <vscale x 4 x float> @vfmin_nxv4f32_vv(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
658 ; CHECK-LABEL: vfmin_nxv4f32_vv:
660 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
661 ; CHECK-NEXT: vfmin.vv v8, v8, v10
663 %v = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
664 ret <vscale x 4 x float> %v
667 define <vscale x 4 x float> @vfmin_nxv4f32_vf(<vscale x 4 x float> %a, float %b) {
668 ; CHECK-LABEL: vfmin_nxv4f32_vf:
670 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
671 ; CHECK-NEXT: vfmin.vf v8, v8, fa0
673 %head = insertelement <vscale x 4 x float> poison, float %b, i32 0
674 %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
675 %v = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %splat)
676 ret <vscale x 4 x float> %v
679 declare <vscale x 8 x float> @llvm.minnum.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>)
681 define <vscale x 8 x float> @vfmin_nxv8f32_vv(<vscale x 8 x float> %a, <vscale x 8 x float> %b) {
682 ; CHECK-LABEL: vfmin_nxv8f32_vv:
684 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
685 ; CHECK-NEXT: vfmin.vv v8, v8, v12
687 %v = call <vscale x 8 x float> @llvm.minnum.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b)
688 ret <vscale x 8 x float> %v
691 define <vscale x 8 x float> @vfmin_nxv8f32_vf(<vscale x 8 x float> %a, float %b) {
692 ; CHECK-LABEL: vfmin_nxv8f32_vf:
694 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
695 ; CHECK-NEXT: vfmin.vf v8, v8, fa0
697 %head = insertelement <vscale x 8 x float> poison, float %b, i32 0
698 %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
699 %v = call <vscale x 8 x float> @llvm.minnum.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %splat)
700 ret <vscale x 8 x float> %v
703 declare <vscale x 16 x float> @llvm.minnum.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>)
705 define <vscale x 16 x float> @vfmin_nxv16f32_vv(<vscale x 16 x float> %a, <vscale x 16 x float> %b) {
706 ; CHECK-LABEL: vfmin_nxv16f32_vv:
708 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
709 ; CHECK-NEXT: vfmin.vv v8, v8, v16
711 %v = call <vscale x 16 x float> @llvm.minnum.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b)
712 ret <vscale x 16 x float> %v
715 define <vscale x 16 x float> @vfmin_nxv16f32_vf(<vscale x 16 x float> %a, float %b) {
716 ; CHECK-LABEL: vfmin_nxv16f32_vf:
718 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
719 ; CHECK-NEXT: vfmin.vf v8, v8, fa0
721 %head = insertelement <vscale x 16 x float> poison, float %b, i32 0
722 %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
723 %v = call <vscale x 16 x float> @llvm.minnum.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %splat)
724 ret <vscale x 16 x float> %v
727 declare <vscale x 1 x double> @llvm.minnum.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>)
729 define <vscale x 1 x double> @vfmin_nxv1f64_vv(<vscale x 1 x double> %a, <vscale x 1 x double> %b) {
730 ; CHECK-LABEL: vfmin_nxv1f64_vv:
732 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
733 ; CHECK-NEXT: vfmin.vv v8, v8, v9
735 %v = call <vscale x 1 x double> @llvm.minnum.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b)
736 ret <vscale x 1 x double> %v
739 define <vscale x 1 x double> @vfmin_nxv1f64_vf(<vscale x 1 x double> %a, double %b) {
740 ; CHECK-LABEL: vfmin_nxv1f64_vf:
742 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
743 ; CHECK-NEXT: vfmin.vf v8, v8, fa0
745 %head = insertelement <vscale x 1 x double> poison, double %b, i32 0
746 %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
747 %v = call <vscale x 1 x double> @llvm.minnum.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %splat)
748 ret <vscale x 1 x double> %v
751 declare <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
753 define <vscale x 2 x double> @vfmin_nxv2f64_vv(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
754 ; CHECK-LABEL: vfmin_nxv2f64_vv:
756 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
757 ; CHECK-NEXT: vfmin.vv v8, v8, v10
759 %v = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
760 ret <vscale x 2 x double> %v
763 define <vscale x 2 x double> @vfmin_nxv2f64_vf(<vscale x 2 x double> %a, double %b) {
764 ; CHECK-LABEL: vfmin_nxv2f64_vf:
766 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
767 ; CHECK-NEXT: vfmin.vf v8, v8, fa0
769 %head = insertelement <vscale x 2 x double> poison, double %b, i32 0
770 %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
771 %v = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %splat)
772 ret <vscale x 2 x double> %v
775 declare <vscale x 4 x double> @llvm.minnum.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>)
777 define <vscale x 4 x double> @vfmin_nxv4f64_vv(<vscale x 4 x double> %a, <vscale x 4 x double> %b) {
778 ; CHECK-LABEL: vfmin_nxv4f64_vv:
780 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
781 ; CHECK-NEXT: vfmin.vv v8, v8, v12
783 %v = call <vscale x 4 x double> @llvm.minnum.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b)
784 ret <vscale x 4 x double> %v
787 define <vscale x 4 x double> @vfmin_nxv4f64_vf(<vscale x 4 x double> %a, double %b) {
788 ; CHECK-LABEL: vfmin_nxv4f64_vf:
790 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
791 ; CHECK-NEXT: vfmin.vf v8, v8, fa0
793 %head = insertelement <vscale x 4 x double> poison, double %b, i32 0
794 %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
795 %v = call <vscale x 4 x double> @llvm.minnum.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %splat)
796 ret <vscale x 4 x double> %v
799 declare <vscale x 8 x double> @llvm.minnum.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>)
801 define <vscale x 8 x double> @vfmin_nxv8f64_vv(<vscale x 8 x double> %a, <vscale x 8 x double> %b) {
802 ; CHECK-LABEL: vfmin_nxv8f64_vv:
804 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
805 ; CHECK-NEXT: vfmin.vv v8, v8, v16
807 %v = call <vscale x 8 x double> @llvm.minnum.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b)
808 ret <vscale x 8 x double> %v
811 define <vscale x 8 x double> @vfmin_nxv8f64_vf(<vscale x 8 x double> %a, double %b) {
812 ; CHECK-LABEL: vfmin_nxv8f64_vf:
814 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
815 ; CHECK-NEXT: vfmin.vf v8, v8, fa0
817 %head = insertelement <vscale x 8 x double> poison, double %b, i32 0
818 %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
819 %v = call <vscale x 8 x double> @llvm.minnum.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %splat)
820 ret <vscale x 8 x double> %v