1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
7 declare <2 x i8> @llvm.vp.abs.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32)
9 define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
10 ; CHECK-LABEL: vp_abs_v2i8:
12 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
13 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
14 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
16 %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> %m, i32 %evl)
20 define <2 x i8> @vp_abs_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
21 ; CHECK-LABEL: vp_abs_v2i8_unmasked:
23 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
24 ; CHECK-NEXT: vrsub.vi v9, v8, 0
25 ; CHECK-NEXT: vmax.vv v8, v8, v9
27 %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
31 declare <4 x i8> @llvm.vp.abs.v4i8(<4 x i8>, i1 immarg, <4 x i1>, i32)
33 define <4 x i8> @vp_abs_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
34 ; CHECK-LABEL: vp_abs_v4i8:
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
37 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
38 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
40 %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> %m, i32 %evl)
44 define <4 x i8> @vp_abs_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
45 ; CHECK-LABEL: vp_abs_v4i8_unmasked:
47 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
48 ; CHECK-NEXT: vrsub.vi v9, v8, 0
49 ; CHECK-NEXT: vmax.vv v8, v8, v9
51 %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
55 declare <8 x i8> @llvm.vp.abs.v8i8(<8 x i8>, i1 immarg, <8 x i1>, i32)
57 define <8 x i8> @vp_abs_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
58 ; CHECK-LABEL: vp_abs_v8i8:
60 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
61 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
62 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
64 %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> %m, i32 %evl)
68 define <8 x i8> @vp_abs_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
69 ; CHECK-LABEL: vp_abs_v8i8_unmasked:
71 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
72 ; CHECK-NEXT: vrsub.vi v9, v8, 0
73 ; CHECK-NEXT: vmax.vv v8, v8, v9
75 %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
79 declare <16 x i8> @llvm.vp.abs.v16i8(<16 x i8>, i1 immarg, <16 x i1>, i32)
81 define <16 x i8> @vp_abs_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
82 ; CHECK-LABEL: vp_abs_v16i8:
84 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
85 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
86 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
88 %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> %m, i32 %evl)
92 define <16 x i8> @vp_abs_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
93 ; CHECK-LABEL: vp_abs_v16i8_unmasked:
95 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
96 ; CHECK-NEXT: vrsub.vi v9, v8, 0
97 ; CHECK-NEXT: vmax.vv v8, v8, v9
99 %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
103 declare <2 x i16> @llvm.vp.abs.v2i16(<2 x i16>, i1 immarg, <2 x i1>, i32)
105 define <2 x i16> @vp_abs_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
106 ; CHECK-LABEL: vp_abs_v2i16:
108 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
109 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
110 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
112 %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> %m, i32 %evl)
116 define <2 x i16> @vp_abs_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
117 ; CHECK-LABEL: vp_abs_v2i16_unmasked:
119 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
120 ; CHECK-NEXT: vrsub.vi v9, v8, 0
121 ; CHECK-NEXT: vmax.vv v8, v8, v9
123 %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
127 declare <4 x i16> @llvm.vp.abs.v4i16(<4 x i16>, i1 immarg, <4 x i1>, i32)
129 define <4 x i16> @vp_abs_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
130 ; CHECK-LABEL: vp_abs_v4i16:
132 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
133 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
134 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
136 %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> %m, i32 %evl)
140 define <4 x i16> @vp_abs_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
141 ; CHECK-LABEL: vp_abs_v4i16_unmasked:
143 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
144 ; CHECK-NEXT: vrsub.vi v9, v8, 0
145 ; CHECK-NEXT: vmax.vv v8, v8, v9
147 %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
151 declare <8 x i16> @llvm.vp.abs.v8i16(<8 x i16>, i1 immarg, <8 x i1>, i32)
153 define <8 x i16> @vp_abs_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
154 ; CHECK-LABEL: vp_abs_v8i16:
156 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
157 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
158 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
160 %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> %m, i32 %evl)
164 define <8 x i16> @vp_abs_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
165 ; CHECK-LABEL: vp_abs_v8i16_unmasked:
167 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
168 ; CHECK-NEXT: vrsub.vi v9, v8, 0
169 ; CHECK-NEXT: vmax.vv v8, v8, v9
171 %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
175 declare <16 x i16> @llvm.vp.abs.v16i16(<16 x i16>, i1 immarg, <16 x i1>, i32)
177 define <16 x i16> @vp_abs_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
178 ; CHECK-LABEL: vp_abs_v16i16:
180 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
181 ; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
182 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
184 %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> %m, i32 %evl)
188 define <16 x i16> @vp_abs_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
189 ; CHECK-LABEL: vp_abs_v16i16_unmasked:
191 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
192 ; CHECK-NEXT: vrsub.vi v10, v8, 0
193 ; CHECK-NEXT: vmax.vv v8, v8, v10
195 %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
199 declare <2 x i32> @llvm.vp.abs.v2i32(<2 x i32>, i1 immarg, <2 x i1>, i32)
201 define <2 x i32> @vp_abs_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
202 ; CHECK-LABEL: vp_abs_v2i32:
204 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
205 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
206 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
208 %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> %m, i32 %evl)
212 define <2 x i32> @vp_abs_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
213 ; CHECK-LABEL: vp_abs_v2i32_unmasked:
215 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
216 ; CHECK-NEXT: vrsub.vi v9, v8, 0
217 ; CHECK-NEXT: vmax.vv v8, v8, v9
219 %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
223 declare <4 x i32> @llvm.vp.abs.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32)
225 define <4 x i32> @vp_abs_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
226 ; CHECK-LABEL: vp_abs_v4i32:
228 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
229 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
230 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
232 %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> %m, i32 %evl)
236 define <4 x i32> @vp_abs_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
237 ; CHECK-LABEL: vp_abs_v4i32_unmasked:
239 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
240 ; CHECK-NEXT: vrsub.vi v9, v8, 0
241 ; CHECK-NEXT: vmax.vv v8, v8, v9
243 %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
247 declare <8 x i32> @llvm.vp.abs.v8i32(<8 x i32>, i1 immarg, <8 x i1>, i32)
249 define <8 x i32> @vp_abs_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
250 ; CHECK-LABEL: vp_abs_v8i32:
252 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
253 ; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
254 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
256 %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> %m, i32 %evl)
260 define <8 x i32> @vp_abs_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
261 ; CHECK-LABEL: vp_abs_v8i32_unmasked:
263 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
264 ; CHECK-NEXT: vrsub.vi v10, v8, 0
265 ; CHECK-NEXT: vmax.vv v8, v8, v10
267 %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
271 declare <16 x i32> @llvm.vp.abs.v16i32(<16 x i32>, i1 immarg, <16 x i1>, i32)
273 define <16 x i32> @vp_abs_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
274 ; CHECK-LABEL: vp_abs_v16i32:
276 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
277 ; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
278 ; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
280 %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> %m, i32 %evl)
284 define <16 x i32> @vp_abs_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
285 ; CHECK-LABEL: vp_abs_v16i32_unmasked:
287 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
288 ; CHECK-NEXT: vrsub.vi v12, v8, 0
289 ; CHECK-NEXT: vmax.vv v8, v8, v12
291 %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
295 declare <2 x i64> @llvm.vp.abs.v2i64(<2 x i64>, i1 immarg, <2 x i1>, i32)
297 define <2 x i64> @vp_abs_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
298 ; CHECK-LABEL: vp_abs_v2i64:
300 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
301 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
302 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
304 %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> %m, i32 %evl)
308 define <2 x i64> @vp_abs_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
309 ; CHECK-LABEL: vp_abs_v2i64_unmasked:
311 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
312 ; CHECK-NEXT: vrsub.vi v9, v8, 0
313 ; CHECK-NEXT: vmax.vv v8, v8, v9
315 %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
319 declare <4 x i64> @llvm.vp.abs.v4i64(<4 x i64>, i1 immarg, <4 x i1>, i32)
321 define <4 x i64> @vp_abs_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
322 ; CHECK-LABEL: vp_abs_v4i64:
324 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
325 ; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
326 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
328 %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> %m, i32 %evl)
332 define <4 x i64> @vp_abs_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
333 ; CHECK-LABEL: vp_abs_v4i64_unmasked:
335 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
336 ; CHECK-NEXT: vrsub.vi v10, v8, 0
337 ; CHECK-NEXT: vmax.vv v8, v8, v10
339 %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
343 declare <8 x i64> @llvm.vp.abs.v8i64(<8 x i64>, i1 immarg, <8 x i1>, i32)
345 define <8 x i64> @vp_abs_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
346 ; CHECK-LABEL: vp_abs_v8i64:
348 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
349 ; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
350 ; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
352 %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> %m, i32 %evl)
356 define <8 x i64> @vp_abs_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
357 ; CHECK-LABEL: vp_abs_v8i64_unmasked:
359 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
360 ; CHECK-NEXT: vrsub.vi v12, v8, 0
361 ; CHECK-NEXT: vmax.vv v8, v8, v12
363 %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
367 declare <15 x i64> @llvm.vp.abs.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32)
369 define <15 x i64> @vp_abs_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
370 ; CHECK-LABEL: vp_abs_v15i64:
372 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
373 ; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
374 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
376 %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> %m, i32 %evl)
380 define <15 x i64> @vp_abs_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
381 ; CHECK-LABEL: vp_abs_v15i64_unmasked:
383 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
384 ; CHECK-NEXT: vrsub.vi v16, v8, 0
385 ; CHECK-NEXT: vmax.vv v8, v8, v16
387 %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> splat (i1 true), i32 %evl)
391 declare <16 x i64> @llvm.vp.abs.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32)
393 define <16 x i64> @vp_abs_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
394 ; CHECK-LABEL: vp_abs_v16i64:
396 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
397 ; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
398 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
400 %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> %m, i32 %evl)
404 define <16 x i64> @vp_abs_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
405 ; CHECK-LABEL: vp_abs_v16i64_unmasked:
407 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
408 ; CHECK-NEXT: vrsub.vi v16, v8, 0
409 ; CHECK-NEXT: vmax.vv v8, v8, v16
411 %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
415 declare <32 x i64> @llvm.vp.abs.v32i64(<32 x i64>, i1 immarg, <32 x i1>, i32)
417 define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
418 ; CHECK-LABEL: vp_abs_v32i64:
420 ; CHECK-NEXT: li a2, 16
421 ; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
422 ; CHECK-NEXT: vslidedown.vi v7, v0, 2
423 ; CHECK-NEXT: mv a1, a0
424 ; CHECK-NEXT: bltu a0, a2, .LBB34_2
425 ; CHECK-NEXT: # %bb.1:
426 ; CHECK-NEXT: li a1, 16
427 ; CHECK-NEXT: .LBB34_2:
428 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
429 ; CHECK-NEXT: vrsub.vi v24, v8, 0, v0.t
430 ; CHECK-NEXT: vmax.vv v8, v8, v24, v0.t
431 ; CHECK-NEXT: addi a1, a0, -16
432 ; CHECK-NEXT: sltu a0, a0, a1
433 ; CHECK-NEXT: addi a0, a0, -1
434 ; CHECK-NEXT: and a0, a0, a1
435 ; CHECK-NEXT: vmv1r.v v0, v7
436 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
437 ; CHECK-NEXT: vrsub.vi v24, v16, 0, v0.t
438 ; CHECK-NEXT: vmax.vv v16, v16, v24, v0.t
440 %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
444 define <32 x i64> @vp_abs_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
445 ; CHECK-LABEL: vp_abs_v32i64_unmasked:
447 ; CHECK-NEXT: li a2, 16
448 ; CHECK-NEXT: mv a1, a0
449 ; CHECK-NEXT: bltu a0, a2, .LBB35_2
450 ; CHECK-NEXT: # %bb.1:
451 ; CHECK-NEXT: li a1, 16
452 ; CHECK-NEXT: .LBB35_2:
453 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
454 ; CHECK-NEXT: vrsub.vi v24, v8, 0
455 ; CHECK-NEXT: vmax.vv v8, v8, v24
456 ; CHECK-NEXT: addi a1, a0, -16
457 ; CHECK-NEXT: sltu a0, a0, a1
458 ; CHECK-NEXT: addi a0, a0, -1
459 ; CHECK-NEXT: and a0, a0, a1
460 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
461 ; CHECK-NEXT: vrsub.vi v24, v16, 0
462 ; CHECK-NEXT: vmax.vv v16, v16, v24
464 %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> splat (i1 true), i32 %evl)