1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
11 define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va) {
12 ; ZVFH-LABEL: vfneg_vv_nxv1f16:
14 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
15 ; ZVFH-NEXT: vfneg.v v8, v8
18 ; ZVFHMIN-LABEL: vfneg_vv_nxv1f16:
20 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
21 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
22 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
23 ; ZVFHMIN-NEXT: vfneg.v v9, v9
24 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
25 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
27 %vb = fneg <vscale x 1 x half> %va
28 ret <vscale x 1 x half> %vb
31 define <vscale x 2 x half> @vfneg_vv_nxv2f16(<vscale x 2 x half> %va) {
32 ; ZVFH-LABEL: vfneg_vv_nxv2f16:
34 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
35 ; ZVFH-NEXT: vfneg.v v8, v8
38 ; ZVFHMIN-LABEL: vfneg_vv_nxv2f16:
40 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
41 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
42 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
43 ; ZVFHMIN-NEXT: vfneg.v v9, v9
44 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
45 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
47 %vb = fneg <vscale x 2 x half> %va
48 ret <vscale x 2 x half> %vb
51 define <vscale x 4 x half> @vfneg_vv_nxv4f16(<vscale x 4 x half> %va) {
52 ; ZVFH-LABEL: vfneg_vv_nxv4f16:
54 ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
55 ; ZVFH-NEXT: vfneg.v v8, v8
58 ; ZVFHMIN-LABEL: vfneg_vv_nxv4f16:
60 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
61 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
62 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
63 ; ZVFHMIN-NEXT: vfneg.v v10, v10
64 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
65 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
67 %vb = fneg <vscale x 4 x half> %va
68 ret <vscale x 4 x half> %vb
71 define <vscale x 8 x half> @vfneg_vv_nxv8f16(<vscale x 8 x half> %va) {
72 ; ZVFH-LABEL: vfneg_vv_nxv8f16:
74 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
75 ; ZVFH-NEXT: vfneg.v v8, v8
78 ; ZVFHMIN-LABEL: vfneg_vv_nxv8f16:
80 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
81 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
82 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
83 ; ZVFHMIN-NEXT: vfneg.v v12, v12
84 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
85 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
87 %vb = fneg <vscale x 8 x half> %va
88 ret <vscale x 8 x half> %vb
91 define <vscale x 16 x half> @vfneg_vv_nxv16f16(<vscale x 16 x half> %va) {
92 ; ZVFH-LABEL: vfneg_vv_nxv16f16:
94 ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
95 ; ZVFH-NEXT: vfneg.v v8, v8
98 ; ZVFHMIN-LABEL: vfneg_vv_nxv16f16:
100 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
101 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
102 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
103 ; ZVFHMIN-NEXT: vfneg.v v16, v16
104 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
105 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
107 %vb = fneg <vscale x 16 x half> %va
108 ret <vscale x 16 x half> %vb
111 define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va) {
112 ; ZVFH-LABEL: vfneg_vv_nxv32f16:
114 ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
115 ; ZVFH-NEXT: vfneg.v v8, v8
118 ; ZVFHMIN-LABEL: vfneg_vv_nxv32f16:
120 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
121 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
122 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
123 ; ZVFHMIN-NEXT: vfneg.v v16, v16
124 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
125 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
126 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
127 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
128 ; ZVFHMIN-NEXT: vfneg.v v16, v16
129 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
130 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
132 %vb = fneg <vscale x 32 x half> %va
133 ret <vscale x 32 x half> %vb
136 define <vscale x 1 x float> @vfneg_vv_nxv1f32(<vscale x 1 x float> %va) {
137 ; CHECK-LABEL: vfneg_vv_nxv1f32:
139 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
140 ; CHECK-NEXT: vfneg.v v8, v8
142 %vb = fneg <vscale x 1 x float> %va
143 ret <vscale x 1 x float> %vb
146 define <vscale x 2 x float> @vfneg_vv_nxv2f32(<vscale x 2 x float> %va) {
147 ; CHECK-LABEL: vfneg_vv_nxv2f32:
149 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
150 ; CHECK-NEXT: vfneg.v v8, v8
152 %vb = fneg <vscale x 2 x float> %va
153 ret <vscale x 2 x float> %vb
156 define <vscale x 4 x float> @vfneg_vv_nxv4f32(<vscale x 4 x float> %va) {
157 ; CHECK-LABEL: vfneg_vv_nxv4f32:
159 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
160 ; CHECK-NEXT: vfneg.v v8, v8
162 %vb = fneg <vscale x 4 x float> %va
163 ret <vscale x 4 x float> %vb
166 define <vscale x 8 x float> @vfneg_vv_nxv8f32(<vscale x 8 x float> %va) {
167 ; CHECK-LABEL: vfneg_vv_nxv8f32:
169 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
170 ; CHECK-NEXT: vfneg.v v8, v8
172 %vb = fneg <vscale x 8 x float> %va
173 ret <vscale x 8 x float> %vb
176 define <vscale x 16 x float> @vfneg_vv_nxv16f32(<vscale x 16 x float> %va) {
177 ; CHECK-LABEL: vfneg_vv_nxv16f32:
179 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
180 ; CHECK-NEXT: vfneg.v v8, v8
182 %vb = fneg <vscale x 16 x float> %va
183 ret <vscale x 16 x float> %vb
186 define <vscale x 1 x double> @vfneg_vv_nxv1f64(<vscale x 1 x double> %va) {
187 ; CHECK-LABEL: vfneg_vv_nxv1f64:
189 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
190 ; CHECK-NEXT: vfneg.v v8, v8
192 %vb = fneg <vscale x 1 x double> %va
193 ret <vscale x 1 x double> %vb
196 define <vscale x 2 x double> @vfneg_vv_nxv2f64(<vscale x 2 x double> %va) {
197 ; CHECK-LABEL: vfneg_vv_nxv2f64:
199 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
200 ; CHECK-NEXT: vfneg.v v8, v8
202 %vb = fneg <vscale x 2 x double> %va
203 ret <vscale x 2 x double> %vb
206 define <vscale x 4 x double> @vfneg_vv_nxv4f64(<vscale x 4 x double> %va) {
207 ; CHECK-LABEL: vfneg_vv_nxv4f64:
209 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
210 ; CHECK-NEXT: vfneg.v v8, v8
212 %vb = fneg <vscale x 4 x double> %va
213 ret <vscale x 4 x double> %vb
216 define <vscale x 8 x double> @vfneg_vv_nxv8f64(<vscale x 8 x double> %va) {
217 ; CHECK-LABEL: vfneg_vv_nxv8f64:
219 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
220 ; CHECK-NEXT: vfneg.v v8, v8
222 %vb = fneg <vscale x 8 x double> %va
223 ret <vscale x 8 x double> %vb