1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
11 declare <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half>, <vscale x 1 x i1>, i32)
13 define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
14 ; ZVFH-LABEL: vfneg_vv_nxv1f16:
16 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
17 ; ZVFH-NEXT: vfneg.v v8, v8, v0.t
20 ; ZVFHMIN-LABEL: vfneg_vv_nxv1f16:
22 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
23 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
24 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
25 ; ZVFHMIN-NEXT: vfneg.v v9, v9, v0.t
26 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
27 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
29 %v = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
30 ret <vscale x 1 x half> %v
33 define <vscale x 1 x half> @vfneg_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
34 ; ZVFH-LABEL: vfneg_vv_nxv1f16_unmasked:
36 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
37 ; ZVFH-NEXT: vfneg.v v8, v8
40 ; ZVFHMIN-LABEL: vfneg_vv_nxv1f16_unmasked:
42 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
43 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
44 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
45 ; ZVFHMIN-NEXT: vfneg.v v9, v9
46 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
47 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
49 %v = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
50 ret <vscale x 1 x half> %v
53 declare <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
55 define <vscale x 2 x half> @vfneg_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
56 ; ZVFH-LABEL: vfneg_vv_nxv2f16:
58 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
59 ; ZVFH-NEXT: vfneg.v v8, v8, v0.t
62 ; ZVFHMIN-LABEL: vfneg_vv_nxv2f16:
64 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
65 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
66 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
67 ; ZVFHMIN-NEXT: vfneg.v v9, v9, v0.t
68 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
69 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
71 %v = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
72 ret <vscale x 2 x half> %v
75 define <vscale x 2 x half> @vfneg_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
76 ; ZVFH-LABEL: vfneg_vv_nxv2f16_unmasked:
78 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
79 ; ZVFH-NEXT: vfneg.v v8, v8
82 ; ZVFHMIN-LABEL: vfneg_vv_nxv2f16_unmasked:
84 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
85 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
86 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
87 ; ZVFHMIN-NEXT: vfneg.v v9, v9
88 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
89 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
91 %v = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
92 ret <vscale x 2 x half> %v
95 declare <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half>, <vscale x 4 x i1>, i32)
97 define <vscale x 4 x half> @vfneg_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
98 ; ZVFH-LABEL: vfneg_vv_nxv4f16:
100 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
101 ; ZVFH-NEXT: vfneg.v v8, v8, v0.t
104 ; ZVFHMIN-LABEL: vfneg_vv_nxv4f16:
106 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
107 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
108 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
109 ; ZVFHMIN-NEXT: vfneg.v v10, v10, v0.t
110 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
111 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
113 %v = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
114 ret <vscale x 4 x half> %v
117 define <vscale x 4 x half> @vfneg_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
118 ; ZVFH-LABEL: vfneg_vv_nxv4f16_unmasked:
120 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
121 ; ZVFH-NEXT: vfneg.v v8, v8
124 ; ZVFHMIN-LABEL: vfneg_vv_nxv4f16_unmasked:
126 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
127 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
128 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
129 ; ZVFHMIN-NEXT: vfneg.v v10, v10
130 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
131 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
133 %v = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
134 ret <vscale x 4 x half> %v
137 declare <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, i32)
139 define <vscale x 8 x half> @vfneg_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
140 ; ZVFH-LABEL: vfneg_vv_nxv8f16:
142 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
143 ; ZVFH-NEXT: vfneg.v v8, v8, v0.t
146 ; ZVFHMIN-LABEL: vfneg_vv_nxv8f16:
148 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
149 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
150 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
151 ; ZVFHMIN-NEXT: vfneg.v v12, v12, v0.t
152 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
153 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
155 %v = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
156 ret <vscale x 8 x half> %v
159 define <vscale x 8 x half> @vfneg_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
160 ; ZVFH-LABEL: vfneg_vv_nxv8f16_unmasked:
162 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
163 ; ZVFH-NEXT: vfneg.v v8, v8
166 ; ZVFHMIN-LABEL: vfneg_vv_nxv8f16_unmasked:
168 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
169 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
170 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
171 ; ZVFHMIN-NEXT: vfneg.v v12, v12
172 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
173 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
175 %v = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
176 ret <vscale x 8 x half> %v
179 declare <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half>, <vscale x 16 x i1>, i32)
181 define <vscale x 16 x half> @vfneg_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
182 ; ZVFH-LABEL: vfneg_vv_nxv16f16:
184 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
185 ; ZVFH-NEXT: vfneg.v v8, v8, v0.t
188 ; ZVFHMIN-LABEL: vfneg_vv_nxv16f16:
190 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
191 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
192 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
193 ; ZVFHMIN-NEXT: vfneg.v v16, v16, v0.t
194 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
195 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
197 %v = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
198 ret <vscale x 16 x half> %v
201 define <vscale x 16 x half> @vfneg_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
202 ; ZVFH-LABEL: vfneg_vv_nxv16f16_unmasked:
204 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
205 ; ZVFH-NEXT: vfneg.v v8, v8
208 ; ZVFHMIN-LABEL: vfneg_vv_nxv16f16_unmasked:
210 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
211 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
212 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
213 ; ZVFHMIN-NEXT: vfneg.v v16, v16
214 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
215 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
217 %v = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
218 ret <vscale x 16 x half> %v
221 declare <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half>, <vscale x 32 x i1>, i32)
223 define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
224 ; ZVFH-LABEL: vfneg_vv_nxv32f16:
226 ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
227 ; ZVFH-NEXT: vfneg.v v8, v8, v0.t
230 ; ZVFHMIN-LABEL: vfneg_vv_nxv32f16:
232 ; ZVFHMIN-NEXT: vmv1r.v v16, v0
233 ; ZVFHMIN-NEXT: csrr a2, vlenb
234 ; ZVFHMIN-NEXT: slli a1, a2, 1
235 ; ZVFHMIN-NEXT: sub a3, a0, a1
236 ; ZVFHMIN-NEXT: sltu a4, a0, a3
237 ; ZVFHMIN-NEXT: addi a4, a4, -1
238 ; ZVFHMIN-NEXT: and a3, a4, a3
239 ; ZVFHMIN-NEXT: srli a2, a2, 2
240 ; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
241 ; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
242 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
243 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
244 ; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
245 ; ZVFHMIN-NEXT: vfneg.v v24, v24, v0.t
246 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
247 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
248 ; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
249 ; ZVFHMIN-NEXT: # %bb.1:
250 ; ZVFHMIN-NEXT: mv a0, a1
251 ; ZVFHMIN-NEXT: .LBB10_2:
252 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
253 ; ZVFHMIN-NEXT: vmv1r.v v0, v16
254 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
255 ; ZVFHMIN-NEXT: vfneg.v v16, v24, v0.t
256 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
257 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
259 %v = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
260 ret <vscale x 32 x half> %v
263 define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
264 ; ZVFH-LABEL: vfneg_vv_nxv32f16_unmasked:
266 ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
267 ; ZVFH-NEXT: vfneg.v v8, v8
270 ; ZVFHMIN-LABEL: vfneg_vv_nxv32f16_unmasked:
272 ; ZVFHMIN-NEXT: csrr a2, vlenb
273 ; ZVFHMIN-NEXT: slli a1, a2, 1
274 ; ZVFHMIN-NEXT: sub a3, a0, a1
275 ; ZVFHMIN-NEXT: sltu a4, a0, a3
276 ; ZVFHMIN-NEXT: addi a4, a4, -1
277 ; ZVFHMIN-NEXT: and a3, a4, a3
278 ; ZVFHMIN-NEXT: srli a2, a2, 2
279 ; ZVFHMIN-NEXT: vsetvli a4, zero, e8, m4, ta, ma
280 ; ZVFHMIN-NEXT: vmset.m v16
281 ; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
282 ; ZVFHMIN-NEXT: vslidedown.vx v0, v16, a2
283 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
284 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
285 ; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
286 ; ZVFHMIN-NEXT: vfneg.v v16, v16, v0.t
287 ; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
288 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
289 ; ZVFHMIN-NEXT: bltu a0, a1, .LBB11_2
290 ; ZVFHMIN-NEXT: # %bb.1:
291 ; ZVFHMIN-NEXT: mv a0, a1
292 ; ZVFHMIN-NEXT: .LBB11_2:
293 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
294 ; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
295 ; ZVFHMIN-NEXT: vfneg.v v16, v16
296 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
297 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
299 %v = call <vscale x 32 x half> @llvm.vp.fneg.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
300 ret <vscale x 32 x half> %v
303 declare <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32)
305 define <vscale x 1 x float> @vfneg_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
306 ; CHECK-LABEL: vfneg_vv_nxv1f32:
308 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
309 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
311 %v = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
312 ret <vscale x 1 x float> %v
315 define <vscale x 1 x float> @vfneg_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) {
316 ; CHECK-LABEL: vfneg_vv_nxv1f32_unmasked:
318 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
319 ; CHECK-NEXT: vfneg.v v8, v8
321 %v = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
322 ret <vscale x 1 x float> %v
325 declare <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
327 define <vscale x 2 x float> @vfneg_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
328 ; CHECK-LABEL: vfneg_vv_nxv2f32:
330 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
331 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
333 %v = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
334 ret <vscale x 2 x float> %v
337 define <vscale x 2 x float> @vfneg_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
338 ; CHECK-LABEL: vfneg_vv_nxv2f32_unmasked:
340 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
341 ; CHECK-NEXT: vfneg.v v8, v8
343 %v = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
344 ret <vscale x 2 x float> %v
347 declare <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32)
349 define <vscale x 4 x float> @vfneg_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
350 ; CHECK-LABEL: vfneg_vv_nxv4f32:
352 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
353 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
355 %v = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
356 ret <vscale x 4 x float> %v
359 define <vscale x 4 x float> @vfneg_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) {
360 ; CHECK-LABEL: vfneg_vv_nxv4f32_unmasked:
362 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
363 ; CHECK-NEXT: vfneg.v v8, v8
365 %v = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
366 ret <vscale x 4 x float> %v
369 declare <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32)
371 define <vscale x 8 x float> @vfneg_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
372 ; CHECK-LABEL: vfneg_vv_nxv8f32:
374 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
375 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
377 %v = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
378 ret <vscale x 8 x float> %v
381 define <vscale x 8 x float> @vfneg_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) {
382 ; CHECK-LABEL: vfneg_vv_nxv8f32_unmasked:
384 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
385 ; CHECK-NEXT: vfneg.v v8, v8
387 %v = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
388 ret <vscale x 8 x float> %v
391 declare <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32)
393 define <vscale x 16 x float> @vfneg_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
394 ; CHECK-LABEL: vfneg_vv_nxv16f32:
396 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
397 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
399 %v = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
400 ret <vscale x 16 x float> %v
403 define <vscale x 16 x float> @vfneg_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) {
404 ; CHECK-LABEL: vfneg_vv_nxv16f32_unmasked:
406 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
407 ; CHECK-NEXT: vfneg.v v8, v8
409 %v = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
410 ret <vscale x 16 x float> %v
413 declare <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
415 define <vscale x 1 x double> @vfneg_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
416 ; CHECK-LABEL: vfneg_vv_nxv1f64:
418 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
419 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
421 %v = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
422 ret <vscale x 1 x double> %v
425 define <vscale x 1 x double> @vfneg_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
426 ; CHECK-LABEL: vfneg_vv_nxv1f64_unmasked:
428 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
429 ; CHECK-NEXT: vfneg.v v8, v8
431 %v = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
432 ret <vscale x 1 x double> %v
435 declare <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
437 define <vscale x 2 x double> @vfneg_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
438 ; CHECK-LABEL: vfneg_vv_nxv2f64:
440 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
441 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
443 %v = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
444 ret <vscale x 2 x double> %v
447 define <vscale x 2 x double> @vfneg_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
448 ; CHECK-LABEL: vfneg_vv_nxv2f64_unmasked:
450 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
451 ; CHECK-NEXT: vfneg.v v8, v8
453 %v = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
454 ret <vscale x 2 x double> %v
457 declare <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
459 define <vscale x 4 x double> @vfneg_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
460 ; CHECK-LABEL: vfneg_vv_nxv4f64:
462 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
463 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
465 %v = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
466 ret <vscale x 4 x double> %v
469 define <vscale x 4 x double> @vfneg_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
470 ; CHECK-LABEL: vfneg_vv_nxv4f64_unmasked:
472 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
473 ; CHECK-NEXT: vfneg.v v8, v8
475 %v = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
476 ret <vscale x 4 x double> %v
479 declare <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
481 define <vscale x 7 x double> @vfneg_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
482 ; CHECK-LABEL: vfneg_vv_nxv7f64:
484 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
485 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
487 %v = call <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
488 ret <vscale x 7 x double> %v
491 define <vscale x 7 x double> @vfneg_vv_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
492 ; CHECK-LABEL: vfneg_vv_nxv7f64_unmasked:
494 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
495 ; CHECK-NEXT: vfneg.v v8, v8
497 %v = call <vscale x 7 x double> @llvm.vp.fneg.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
498 ret <vscale x 7 x double> %v
501 declare <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
503 define <vscale x 8 x double> @vfneg_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
504 ; CHECK-LABEL: vfneg_vv_nxv8f64:
506 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
507 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
509 %v = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
510 ret <vscale x 8 x double> %v
513 define <vscale x 8 x double> @vfneg_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
514 ; CHECK-LABEL: vfneg_vv_nxv8f64_unmasked:
516 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
517 ; CHECK-NEXT: vfneg.v v8, v8
519 %v = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
520 ret <vscale x 8 x double> %v
524 declare <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
526 define <vscale x 16 x double> @vfneg_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
527 ; CHECK-LABEL: vfneg_vv_nxv16f64:
529 ; CHECK-NEXT: vmv1r.v v24, v0
530 ; CHECK-NEXT: csrr a1, vlenb
531 ; CHECK-NEXT: srli a2, a1, 3
532 ; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
533 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
534 ; CHECK-NEXT: sub a2, a0, a1
535 ; CHECK-NEXT: sltu a3, a0, a2
536 ; CHECK-NEXT: addi a3, a3, -1
537 ; CHECK-NEXT: and a2, a3, a2
538 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
539 ; CHECK-NEXT: vfneg.v v16, v16, v0.t
540 ; CHECK-NEXT: bltu a0, a1, .LBB32_2
541 ; CHECK-NEXT: # %bb.1:
542 ; CHECK-NEXT: mv a0, a1
543 ; CHECK-NEXT: .LBB32_2:
544 ; CHECK-NEXT: vmv1r.v v0, v24
545 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
546 ; CHECK-NEXT: vfneg.v v8, v8, v0.t
548 %v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
549 ret <vscale x 16 x double> %v
552 define <vscale x 16 x double> @vfneg_vv_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
553 ; CHECK-LABEL: vfneg_vv_nxv16f64_unmasked:
555 ; CHECK-NEXT: csrr a1, vlenb
556 ; CHECK-NEXT: sub a2, a0, a1
557 ; CHECK-NEXT: sltu a3, a0, a2
558 ; CHECK-NEXT: addi a3, a3, -1
559 ; CHECK-NEXT: and a2, a3, a2
560 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
561 ; CHECK-NEXT: vfneg.v v16, v16
562 ; CHECK-NEXT: bltu a0, a1, .LBB33_2
563 ; CHECK-NEXT: # %bb.1:
564 ; CHECK-NEXT: mv a0, a1
565 ; CHECK-NEXT: .LBB33_2:
566 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
567 ; CHECK-NEXT: vfneg.v v8, v8
569 %v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
570 ret <vscale x 16 x double> %v