1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
11 declare <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32)
13 define <vscale x 1 x half> @vfsgnj_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
14 ; ZVFH-LABEL: vfsgnj_vv_nxv1f16:
16 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
17 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9, v0.t
20 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv1f16:
22 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
23 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
24 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
25 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
26 ; ZVFHMIN-NEXT: vfsgnj.vv v9, v9, v10, v0.t
27 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
28 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
30 %v = call <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
31 ret <vscale x 1 x half> %v
34 define <vscale x 1 x half> @vfsgnj_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, i32 zeroext %evl) {
35 ; ZVFH-LABEL: vfsgnj_vv_nxv1f16_unmasked:
37 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
38 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9
41 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv1f16_unmasked:
43 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
44 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
45 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
46 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
47 ; ZVFHMIN-NEXT: vfsgnj.vv v9, v9, v10
48 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
49 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
51 %v = call <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
52 ret <vscale x 1 x half> %v
55 declare <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32)
57 define <vscale x 2 x half> @vfsgnj_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
58 ; ZVFH-LABEL: vfsgnj_vv_nxv2f16:
60 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
61 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9, v0.t
64 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv2f16:
66 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
67 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
68 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
69 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
70 ; ZVFHMIN-NEXT: vfsgnj.vv v9, v9, v10, v0.t
71 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
72 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
74 %v = call <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
75 ret <vscale x 2 x half> %v
78 define <vscale x 2 x half> @vfsgnj_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evl) {
79 ; ZVFH-LABEL: vfsgnj_vv_nxv2f16_unmasked:
81 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
82 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9
85 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv2f16_unmasked:
87 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
88 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
89 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
90 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
91 ; ZVFHMIN-NEXT: vfsgnj.vv v9, v9, v10
92 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
93 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
95 %v = call <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
96 ret <vscale x 2 x half> %v
99 declare <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32)
101 define <vscale x 4 x half> @vfsgnj_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
102 ; ZVFH-LABEL: vfsgnj_vv_nxv4f16:
104 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
105 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9, v0.t
108 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv4f16:
110 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
111 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
112 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
113 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
114 ; ZVFHMIN-NEXT: vfsgnj.vv v10, v12, v10, v0.t
115 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
116 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
118 %v = call <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
119 ret <vscale x 4 x half> %v
122 define <vscale x 4 x half> @vfsgnj_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, i32 zeroext %evl) {
123 ; ZVFH-LABEL: vfsgnj_vv_nxv4f16_unmasked:
125 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
126 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v9
129 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv4f16_unmasked:
131 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
132 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
133 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
134 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
135 ; ZVFHMIN-NEXT: vfsgnj.vv v10, v12, v10
136 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
137 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
139 %v = call <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
140 ret <vscale x 4 x half> %v
143 declare <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32)
145 define <vscale x 8 x half> @vfsgnj_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
146 ; ZVFH-LABEL: vfsgnj_vv_nxv8f16:
148 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
149 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v10, v0.t
152 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv8f16:
154 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
155 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
156 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
157 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
158 ; ZVFHMIN-NEXT: vfsgnj.vv v12, v16, v12, v0.t
159 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
160 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
162 %v = call <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
163 ret <vscale x 8 x half> %v
166 define <vscale x 8 x half> @vfsgnj_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, i32 zeroext %evl) {
167 ; ZVFH-LABEL: vfsgnj_vv_nxv8f16_unmasked:
169 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
170 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v10
173 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv8f16_unmasked:
175 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
176 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
177 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
178 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
179 ; ZVFHMIN-NEXT: vfsgnj.vv v12, v16, v12
180 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
181 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
183 %v = call <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
184 ret <vscale x 8 x half> %v
187 declare <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32)
189 define <vscale x 16 x half> @vfsgnj_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
190 ; ZVFH-LABEL: vfsgnj_vv_nxv16f16:
192 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
193 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v12, v0.t
196 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv16f16:
198 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
199 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
200 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
201 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
202 ; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
203 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
204 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
206 %v = call <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
207 ret <vscale x 16 x half> %v
210 define <vscale x 16 x half> @vfsgnj_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, i32 zeroext %evl) {
211 ; ZVFH-LABEL: vfsgnj_vv_nxv16f16_unmasked:
213 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
214 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v12
217 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv16f16_unmasked:
219 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
220 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
221 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
222 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
223 ; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16
224 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
225 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
227 %v = call <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
228 ret <vscale x 16 x half> %v
231 declare <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32)
233 define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) {
234 ; ZVFH-LABEL: vfsgnj_vv_nxv32f16:
236 ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
237 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v16, v0.t
240 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv32f16:
242 ; ZVFHMIN-NEXT: addi sp, sp, -16
243 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
244 ; ZVFHMIN-NEXT: csrr a1, vlenb
245 ; ZVFHMIN-NEXT: slli a1, a1, 3
246 ; ZVFHMIN-NEXT: sub sp, sp, a1
247 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
248 ; ZVFHMIN-NEXT: vmv1r.v v7, v0
249 ; ZVFHMIN-NEXT: csrr a2, vlenb
250 ; ZVFHMIN-NEXT: slli a1, a2, 1
251 ; ZVFHMIN-NEXT: sub a3, a0, a1
252 ; ZVFHMIN-NEXT: sltu a4, a0, a3
253 ; ZVFHMIN-NEXT: addi a4, a4, -1
254 ; ZVFHMIN-NEXT: and a3, a4, a3
255 ; ZVFHMIN-NEXT: srli a2, a2, 2
256 ; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
257 ; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
258 ; ZVFHMIN-NEXT: addi a2, sp, 16
259 ; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
260 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
261 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
262 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
263 ; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
264 ; ZVFHMIN-NEXT: vfsgnj.vv v16, v16, v24, v0.t
265 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
266 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
267 ; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
268 ; ZVFHMIN-NEXT: # %bb.1:
269 ; ZVFHMIN-NEXT: mv a0, a1
270 ; ZVFHMIN-NEXT: .LBB10_2:
271 ; ZVFHMIN-NEXT: addi a1, sp, 16
272 ; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
273 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
274 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
275 ; ZVFHMIN-NEXT: vmv1r.v v0, v7
276 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
277 ; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
278 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
279 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
280 ; ZVFHMIN-NEXT: csrr a0, vlenb
281 ; ZVFHMIN-NEXT: slli a0, a0, 3
282 ; ZVFHMIN-NEXT: add sp, sp, a0
283 ; ZVFHMIN-NEXT: addi sp, sp, 16
285 %v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
286 ret <vscale x 32 x half> %v
289 define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, i32 zeroext %evl) {
290 ; ZVFH-LABEL: vfsgnj_vv_nxv32f16_unmasked:
292 ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
293 ; ZVFH-NEXT: vfsgnj.vv v8, v8, v16
296 ; ZVFHMIN-LABEL: vfsgnj_vv_nxv32f16_unmasked:
298 ; ZVFHMIN-NEXT: addi sp, sp, -16
299 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
300 ; ZVFHMIN-NEXT: csrr a1, vlenb
301 ; ZVFHMIN-NEXT: slli a1, a1, 3
302 ; ZVFHMIN-NEXT: sub sp, sp, a1
303 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
304 ; ZVFHMIN-NEXT: csrr a2, vlenb
305 ; ZVFHMIN-NEXT: slli a1, a2, 1
306 ; ZVFHMIN-NEXT: sub a3, a0, a1
307 ; ZVFHMIN-NEXT: sltu a4, a0, a3
308 ; ZVFHMIN-NEXT: addi a4, a4, -1
309 ; ZVFHMIN-NEXT: and a3, a4, a3
310 ; ZVFHMIN-NEXT: srli a2, a2, 2
311 ; ZVFHMIN-NEXT: vsetvli a4, zero, e8, m4, ta, ma
312 ; ZVFHMIN-NEXT: vmset.m v24
313 ; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
314 ; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
315 ; ZVFHMIN-NEXT: addi a2, sp, 16
316 ; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
317 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
318 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
319 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
320 ; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
321 ; ZVFHMIN-NEXT: vfsgnj.vv v16, v16, v24, v0.t
322 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
323 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
324 ; ZVFHMIN-NEXT: bltu a0, a1, .LBB11_2
325 ; ZVFHMIN-NEXT: # %bb.1:
326 ; ZVFHMIN-NEXT: mv a0, a1
327 ; ZVFHMIN-NEXT: .LBB11_2:
328 ; ZVFHMIN-NEXT: addi a1, sp, 16
329 ; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
330 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
331 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
332 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
333 ; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16
334 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
335 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
336 ; ZVFHMIN-NEXT: csrr a0, vlenb
337 ; ZVFHMIN-NEXT: slli a0, a0, 3
338 ; ZVFHMIN-NEXT: add sp, sp, a0
339 ; ZVFHMIN-NEXT: addi sp, sp, 16
341 %v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
342 ret <vscale x 32 x half> %v
345 declare <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32)
347 define <vscale x 1 x float> @vfsgnj_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
348 ; CHECK-LABEL: vfsgnj_vv_nxv1f32:
350 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
351 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
353 %v = call <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
354 ret <vscale x 1 x float> %v
357 define <vscale x 1 x float> @vfsgnj_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, i32 zeroext %evl) {
358 ; CHECK-LABEL: vfsgnj_vv_nxv1f32_unmasked:
360 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
361 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
363 %v = call <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
364 ret <vscale x 1 x float> %v
367 declare <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32)
369 define <vscale x 2 x float> @vfsgnj_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
370 ; CHECK-LABEL: vfsgnj_vv_nxv2f32:
372 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
373 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
375 %v = call <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
376 ret <vscale x 2 x float> %v
379 define <vscale x 2 x float> @vfsgnj_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evl) {
380 ; CHECK-LABEL: vfsgnj_vv_nxv2f32_unmasked:
382 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
383 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
385 %v = call <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
386 ret <vscale x 2 x float> %v
389 declare <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32)
391 define <vscale x 4 x float> @vfsgnj_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
392 ; CHECK-LABEL: vfsgnj_vv_nxv4f32:
394 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
395 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
397 %v = call <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
398 ret <vscale x 4 x float> %v
401 define <vscale x 4 x float> @vfsgnj_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, i32 zeroext %evl) {
402 ; CHECK-LABEL: vfsgnj_vv_nxv4f32_unmasked:
404 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
405 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
407 %v = call <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
408 ret <vscale x 4 x float> %v
411 declare <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32)
413 define <vscale x 8 x float> @vfsgnj_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
414 ; CHECK-LABEL: vfsgnj_vv_nxv8f32:
416 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
417 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
419 %v = call <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
420 ret <vscale x 8 x float> %v
423 define <vscale x 8 x float> @vfsgnj_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, i32 zeroext %evl) {
424 ; CHECK-LABEL: vfsgnj_vv_nxv8f32_unmasked:
426 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
427 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
429 %v = call <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
430 ret <vscale x 8 x float> %v
433 declare <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32)
435 define <vscale x 16 x float> @vfsgnj_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
436 ; CHECK-LABEL: vfsgnj_vv_nxv16f32:
438 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
439 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
441 %v = call <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
442 ret <vscale x 16 x float> %v
445 define <vscale x 16 x float> @vfsgnj_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, i32 zeroext %evl) {
446 ; CHECK-LABEL: vfsgnj_vv_nxv16f32_unmasked:
448 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
449 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16
451 %v = call <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
452 ret <vscale x 16 x float> %v
455 declare <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32)
457 define <vscale x 1 x double> @vfsgnj_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
458 ; CHECK-LABEL: vfsgnj_vv_nxv1f64:
460 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
461 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
463 %v = call <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
464 ret <vscale x 1 x double> %v
467 define <vscale x 1 x double> @vfsgnj_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evl) {
468 ; CHECK-LABEL: vfsgnj_vv_nxv1f64_unmasked:
470 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
471 ; CHECK-NEXT: vfsgnj.vv v8, v8, v9
473 %v = call <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
474 ret <vscale x 1 x double> %v
477 declare <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32)
479 define <vscale x 2 x double> @vfsgnj_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
480 ; CHECK-LABEL: vfsgnj_vv_nxv2f64:
482 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
483 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
485 %v = call <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
486 ret <vscale x 2 x double> %v
489 define <vscale x 2 x double> @vfsgnj_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, i32 zeroext %evl) {
490 ; CHECK-LABEL: vfsgnj_vv_nxv2f64_unmasked:
492 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
493 ; CHECK-NEXT: vfsgnj.vv v8, v8, v10
495 %v = call <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
496 ret <vscale x 2 x double> %v
499 declare <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32)
501 define <vscale x 4 x double> @vfsgnj_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
502 ; CHECK-LABEL: vfsgnj_vv_nxv4f64:
504 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
505 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
507 %v = call <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
508 ret <vscale x 4 x double> %v
511 define <vscale x 4 x double> @vfsgnj_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, i32 zeroext %evl) {
512 ; CHECK-LABEL: vfsgnj_vv_nxv4f64_unmasked:
514 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
515 ; CHECK-NEXT: vfsgnj.vv v8, v8, v12
517 %v = call <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
518 ret <vscale x 4 x double> %v
521 declare <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32)
523 define <vscale x 8 x double> @vfsgnj_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
524 ; CHECK-LABEL: vfsgnj_vv_nxv8f64:
526 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
527 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
529 %v = call <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
530 ret <vscale x 8 x double> %v
533 define <vscale x 8 x double> @vfsgnj_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, i32 zeroext %evl) {
534 ; CHECK-LABEL: vfsgnj_vv_nxv8f64_unmasked:
536 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
537 ; CHECK-NEXT: vfsgnj.vv v8, v8, v16
539 %v = call <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
540 ret <vscale x 8 x double> %v