1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
11 declare <vscale x 1 x half> @llvm.vp.fabs.nxv1f16(<vscale x 1 x half>, <vscale x 1 x i1>, i32)
13 define <vscale x 1 x half> @vfabs_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
14 ; ZVFH-LABEL: vfabs_vv_nxv1f16:
16 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
17 ; ZVFH-NEXT: vfabs.v v8, v8, v0.t
20 ; ZVFHMIN-LABEL: vfabs_vv_nxv1f16:
22 ; ZVFHMIN-NEXT: lui a1, 8
23 ; ZVFHMIN-NEXT: addi a1, a1, -1
24 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
25 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t
27 %v = call <vscale x 1 x half> @llvm.vp.fabs.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
28 ret <vscale x 1 x half> %v
31 define <vscale x 1 x half> @vfabs_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
32 ; ZVFH-LABEL: vfabs_vv_nxv1f16_unmasked:
34 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
35 ; ZVFH-NEXT: vfabs.v v8, v8
38 ; ZVFHMIN-LABEL: vfabs_vv_nxv1f16_unmasked:
40 ; ZVFHMIN-NEXT: lui a1, 8
41 ; ZVFHMIN-NEXT: addi a1, a1, -1
42 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
43 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
45 %v = call <vscale x 1 x half> @llvm.vp.fabs.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
46 ret <vscale x 1 x half> %v
49 declare <vscale x 2 x half> @llvm.vp.fabs.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
51 define <vscale x 2 x half> @vfabs_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
52 ; ZVFH-LABEL: vfabs_vv_nxv2f16:
54 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
55 ; ZVFH-NEXT: vfabs.v v8, v8, v0.t
58 ; ZVFHMIN-LABEL: vfabs_vv_nxv2f16:
60 ; ZVFHMIN-NEXT: lui a1, 8
61 ; ZVFHMIN-NEXT: addi a1, a1, -1
62 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
63 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t
65 %v = call <vscale x 2 x half> @llvm.vp.fabs.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
66 ret <vscale x 2 x half> %v
69 define <vscale x 2 x half> @vfabs_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
70 ; ZVFH-LABEL: vfabs_vv_nxv2f16_unmasked:
72 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
73 ; ZVFH-NEXT: vfabs.v v8, v8
76 ; ZVFHMIN-LABEL: vfabs_vv_nxv2f16_unmasked:
78 ; ZVFHMIN-NEXT: lui a1, 8
79 ; ZVFHMIN-NEXT: addi a1, a1, -1
80 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
81 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
83 %v = call <vscale x 2 x half> @llvm.vp.fabs.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
84 ret <vscale x 2 x half> %v
87 declare <vscale x 4 x half> @llvm.vp.fabs.nxv4f16(<vscale x 4 x half>, <vscale x 4 x i1>, i32)
89 define <vscale x 4 x half> @vfabs_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
90 ; ZVFH-LABEL: vfabs_vv_nxv4f16:
92 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
93 ; ZVFH-NEXT: vfabs.v v8, v8, v0.t
96 ; ZVFHMIN-LABEL: vfabs_vv_nxv4f16:
98 ; ZVFHMIN-NEXT: lui a1, 8
99 ; ZVFHMIN-NEXT: addi a1, a1, -1
100 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
101 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t
103 %v = call <vscale x 4 x half> @llvm.vp.fabs.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
104 ret <vscale x 4 x half> %v
107 define <vscale x 4 x half> @vfabs_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
108 ; ZVFH-LABEL: vfabs_vv_nxv4f16_unmasked:
110 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
111 ; ZVFH-NEXT: vfabs.v v8, v8
114 ; ZVFHMIN-LABEL: vfabs_vv_nxv4f16_unmasked:
116 ; ZVFHMIN-NEXT: lui a1, 8
117 ; ZVFHMIN-NEXT: addi a1, a1, -1
118 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
119 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
121 %v = call <vscale x 4 x half> @llvm.vp.fabs.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
122 ret <vscale x 4 x half> %v
125 declare <vscale x 8 x half> @llvm.vp.fabs.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, i32)
127 define <vscale x 8 x half> @vfabs_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
128 ; ZVFH-LABEL: vfabs_vv_nxv8f16:
130 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
131 ; ZVFH-NEXT: vfabs.v v8, v8, v0.t
134 ; ZVFHMIN-LABEL: vfabs_vv_nxv8f16:
136 ; ZVFHMIN-NEXT: lui a1, 8
137 ; ZVFHMIN-NEXT: addi a1, a1, -1
138 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
139 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t
141 %v = call <vscale x 8 x half> @llvm.vp.fabs.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
142 ret <vscale x 8 x half> %v
145 define <vscale x 8 x half> @vfabs_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
146 ; ZVFH-LABEL: vfabs_vv_nxv8f16_unmasked:
148 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
149 ; ZVFH-NEXT: vfabs.v v8, v8
152 ; ZVFHMIN-LABEL: vfabs_vv_nxv8f16_unmasked:
154 ; ZVFHMIN-NEXT: lui a1, 8
155 ; ZVFHMIN-NEXT: addi a1, a1, -1
156 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
157 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
159 %v = call <vscale x 8 x half> @llvm.vp.fabs.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
160 ret <vscale x 8 x half> %v
163 declare <vscale x 16 x half> @llvm.vp.fabs.nxv16f16(<vscale x 16 x half>, <vscale x 16 x i1>, i32)
165 define <vscale x 16 x half> @vfabs_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
166 ; ZVFH-LABEL: vfabs_vv_nxv16f16:
168 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
169 ; ZVFH-NEXT: vfabs.v v8, v8, v0.t
172 ; ZVFHMIN-LABEL: vfabs_vv_nxv16f16:
174 ; ZVFHMIN-NEXT: lui a1, 8
175 ; ZVFHMIN-NEXT: addi a1, a1, -1
176 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
177 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t
179 %v = call <vscale x 16 x half> @llvm.vp.fabs.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
180 ret <vscale x 16 x half> %v
183 define <vscale x 16 x half> @vfabs_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
184 ; ZVFH-LABEL: vfabs_vv_nxv16f16_unmasked:
186 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
187 ; ZVFH-NEXT: vfabs.v v8, v8
190 ; ZVFHMIN-LABEL: vfabs_vv_nxv16f16_unmasked:
192 ; ZVFHMIN-NEXT: lui a1, 8
193 ; ZVFHMIN-NEXT: addi a1, a1, -1
194 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
195 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
197 %v = call <vscale x 16 x half> @llvm.vp.fabs.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
198 ret <vscale x 16 x half> %v
201 declare <vscale x 32 x half> @llvm.vp.fabs.nxv32f16(<vscale x 32 x half>, <vscale x 32 x i1>, i32)
203 define <vscale x 32 x half> @vfabs_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
204 ; ZVFH-LABEL: vfabs_vv_nxv32f16:
206 ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
207 ; ZVFH-NEXT: vfabs.v v8, v8, v0.t
210 ; ZVFHMIN-LABEL: vfabs_vv_nxv32f16:
212 ; ZVFHMIN-NEXT: lui a1, 8
213 ; ZVFHMIN-NEXT: addi a1, a1, -1
214 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
215 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1, v0.t
217 %v = call <vscale x 32 x half> @llvm.vp.fabs.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
218 ret <vscale x 32 x half> %v
221 define <vscale x 32 x half> @vfabs_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
222 ; ZVFH-LABEL: vfabs_vv_nxv32f16_unmasked:
224 ; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
225 ; ZVFH-NEXT: vfabs.v v8, v8
228 ; ZVFHMIN-LABEL: vfabs_vv_nxv32f16_unmasked:
230 ; ZVFHMIN-NEXT: lui a1, 8
231 ; ZVFHMIN-NEXT: addi a1, a1, -1
232 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma
233 ; ZVFHMIN-NEXT: vand.vx v8, v8, a1
235 %v = call <vscale x 32 x half> @llvm.vp.fabs.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
236 ret <vscale x 32 x half> %v
239 declare <vscale x 1 x float> @llvm.vp.fabs.nxv1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32)
241 define <vscale x 1 x float> @vfabs_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
242 ; CHECK-LABEL: vfabs_vv_nxv1f32:
244 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
245 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
247 %v = call <vscale x 1 x float> @llvm.vp.fabs.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl)
248 ret <vscale x 1 x float> %v
251 define <vscale x 1 x float> @vfabs_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) {
252 ; CHECK-LABEL: vfabs_vv_nxv1f32_unmasked:
254 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
255 ; CHECK-NEXT: vfabs.v v8, v8
257 %v = call <vscale x 1 x float> @llvm.vp.fabs.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
258 ret <vscale x 1 x float> %v
261 declare <vscale x 2 x float> @llvm.vp.fabs.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
263 define <vscale x 2 x float> @vfabs_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
264 ; CHECK-LABEL: vfabs_vv_nxv2f32:
266 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
267 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
269 %v = call <vscale x 2 x float> @llvm.vp.fabs.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
270 ret <vscale x 2 x float> %v
273 define <vscale x 2 x float> @vfabs_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
274 ; CHECK-LABEL: vfabs_vv_nxv2f32_unmasked:
276 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
277 ; CHECK-NEXT: vfabs.v v8, v8
279 %v = call <vscale x 2 x float> @llvm.vp.fabs.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
280 ret <vscale x 2 x float> %v
283 declare <vscale x 4 x float> @llvm.vp.fabs.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32)
285 define <vscale x 4 x float> @vfabs_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
286 ; CHECK-LABEL: vfabs_vv_nxv4f32:
288 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
289 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
291 %v = call <vscale x 4 x float> @llvm.vp.fabs.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl)
292 ret <vscale x 4 x float> %v
295 define <vscale x 4 x float> @vfabs_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) {
296 ; CHECK-LABEL: vfabs_vv_nxv4f32_unmasked:
298 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
299 ; CHECK-NEXT: vfabs.v v8, v8
301 %v = call <vscale x 4 x float> @llvm.vp.fabs.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
302 ret <vscale x 4 x float> %v
305 declare <vscale x 8 x float> @llvm.vp.fabs.nxv8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32)
307 define <vscale x 8 x float> @vfabs_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
308 ; CHECK-LABEL: vfabs_vv_nxv8f32:
310 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
311 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
313 %v = call <vscale x 8 x float> @llvm.vp.fabs.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl)
314 ret <vscale x 8 x float> %v
317 define <vscale x 8 x float> @vfabs_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) {
318 ; CHECK-LABEL: vfabs_vv_nxv8f32_unmasked:
320 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
321 ; CHECK-NEXT: vfabs.v v8, v8
323 %v = call <vscale x 8 x float> @llvm.vp.fabs.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
324 ret <vscale x 8 x float> %v
327 declare <vscale x 16 x float> @llvm.vp.fabs.nxv16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32)
329 define <vscale x 16 x float> @vfabs_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
330 ; CHECK-LABEL: vfabs_vv_nxv16f32:
332 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
333 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
335 %v = call <vscale x 16 x float> @llvm.vp.fabs.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl)
336 ret <vscale x 16 x float> %v
339 define <vscale x 16 x float> @vfabs_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) {
340 ; CHECK-LABEL: vfabs_vv_nxv16f32_unmasked:
342 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
343 ; CHECK-NEXT: vfabs.v v8, v8
345 %v = call <vscale x 16 x float> @llvm.vp.fabs.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
346 ret <vscale x 16 x float> %v
349 declare <vscale x 1 x double> @llvm.vp.fabs.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
351 define <vscale x 1 x double> @vfabs_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
352 ; CHECK-LABEL: vfabs_vv_nxv1f64:
354 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
355 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
357 %v = call <vscale x 1 x double> @llvm.vp.fabs.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
358 ret <vscale x 1 x double> %v
361 define <vscale x 1 x double> @vfabs_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
362 ; CHECK-LABEL: vfabs_vv_nxv1f64_unmasked:
364 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
365 ; CHECK-NEXT: vfabs.v v8, v8
367 %v = call <vscale x 1 x double> @llvm.vp.fabs.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
368 ret <vscale x 1 x double> %v
371 declare <vscale x 2 x double> @llvm.vp.fabs.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
373 define <vscale x 2 x double> @vfabs_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
374 ; CHECK-LABEL: vfabs_vv_nxv2f64:
376 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
377 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
379 %v = call <vscale x 2 x double> @llvm.vp.fabs.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
380 ret <vscale x 2 x double> %v
383 define <vscale x 2 x double> @vfabs_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
384 ; CHECK-LABEL: vfabs_vv_nxv2f64_unmasked:
386 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
387 ; CHECK-NEXT: vfabs.v v8, v8
389 %v = call <vscale x 2 x double> @llvm.vp.fabs.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
390 ret <vscale x 2 x double> %v
393 declare <vscale x 4 x double> @llvm.vp.fabs.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
395 define <vscale x 4 x double> @vfabs_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
396 ; CHECK-LABEL: vfabs_vv_nxv4f64:
398 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
399 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
401 %v = call <vscale x 4 x double> @llvm.vp.fabs.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
402 ret <vscale x 4 x double> %v
405 define <vscale x 4 x double> @vfabs_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
406 ; CHECK-LABEL: vfabs_vv_nxv4f64_unmasked:
408 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
409 ; CHECK-NEXT: vfabs.v v8, v8
411 %v = call <vscale x 4 x double> @llvm.vp.fabs.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
412 ret <vscale x 4 x double> %v
415 declare <vscale x 7 x double> @llvm.vp.fabs.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
417 define <vscale x 7 x double> @vfabs_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
418 ; CHECK-LABEL: vfabs_vv_nxv7f64:
420 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
421 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
423 %v = call <vscale x 7 x double> @llvm.vp.fabs.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
424 ret <vscale x 7 x double> %v
427 define <vscale x 7 x double> @vfabs_vv_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
428 ; CHECK-LABEL: vfabs_vv_nxv7f64_unmasked:
430 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
431 ; CHECK-NEXT: vfabs.v v8, v8
433 %v = call <vscale x 7 x double> @llvm.vp.fabs.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
434 ret <vscale x 7 x double> %v
437 declare <vscale x 8 x double> @llvm.vp.fabs.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
439 define <vscale x 8 x double> @vfabs_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
440 ; CHECK-LABEL: vfabs_vv_nxv8f64:
442 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
443 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
445 %v = call <vscale x 8 x double> @llvm.vp.fabs.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
446 ret <vscale x 8 x double> %v
449 define <vscale x 8 x double> @vfabs_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
450 ; CHECK-LABEL: vfabs_vv_nxv8f64_unmasked:
452 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
453 ; CHECK-NEXT: vfabs.v v8, v8
455 %v = call <vscale x 8 x double> @llvm.vp.fabs.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
456 ret <vscale x 8 x double> %v
460 declare <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
462 define <vscale x 16 x double> @vfabs_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
463 ; CHECK-LABEL: vfabs_vv_nxv16f64:
465 ; CHECK-NEXT: vmv1r.v v24, v0
466 ; CHECK-NEXT: csrr a1, vlenb
467 ; CHECK-NEXT: srli a2, a1, 3
468 ; CHECK-NEXT: sub a3, a0, a1
469 ; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
470 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
471 ; CHECK-NEXT: sltu a2, a0, a3
472 ; CHECK-NEXT: addi a2, a2, -1
473 ; CHECK-NEXT: and a2, a2, a3
474 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
475 ; CHECK-NEXT: vfabs.v v16, v16, v0.t
476 ; CHECK-NEXT: bltu a0, a1, .LBB32_2
477 ; CHECK-NEXT: # %bb.1:
478 ; CHECK-NEXT: mv a0, a1
479 ; CHECK-NEXT: .LBB32_2:
480 ; CHECK-NEXT: vmv1r.v v0, v24
481 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
482 ; CHECK-NEXT: vfabs.v v8, v8, v0.t
484 %v = call <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
485 ret <vscale x 16 x double> %v
488 define <vscale x 16 x double> @vfabs_vv_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
489 ; CHECK-LABEL: vfabs_vv_nxv16f64_unmasked:
491 ; CHECK-NEXT: csrr a1, vlenb
492 ; CHECK-NEXT: sub a2, a0, a1
493 ; CHECK-NEXT: sltu a3, a0, a2
494 ; CHECK-NEXT: addi a3, a3, -1
495 ; CHECK-NEXT: and a2, a3, a2
496 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
497 ; CHECK-NEXT: vfabs.v v16, v16
498 ; CHECK-NEXT: bltu a0, a1, .LBB33_2
499 ; CHECK-NEXT: # %bb.1:
500 ; CHECK-NEXT: mv a0, a1
501 ; CHECK-NEXT: .LBB33_2:
502 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
503 ; CHECK-NEXT: vfabs.v v8, v8
505 %v = call <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
506 ret <vscale x 16 x double> %v