1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
4 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
7 declare <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8>, i1 immarg, <vscale x 1 x i1>, i32)
9 define <vscale x 1 x i8> @vp_abs_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
10 ; CHECK-LABEL: vp_abs_nxv1i8:
12 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
13 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
14 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
16 %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
17 ret <vscale x 1 x i8> %v
20 define <vscale x 1 x i8> @vp_abs_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
21 ; CHECK-LABEL: vp_abs_nxv1i8_unmasked:
23 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
24 ; CHECK-NEXT: vrsub.vi v9, v8, 0
25 ; CHECK-NEXT: vmax.vv v8, v8, v9
27 %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
28 ret <vscale x 1 x i8> %v
31 declare <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8>, i1 immarg, <vscale x 2 x i1>, i32)
33 define <vscale x 2 x i8> @vp_abs_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
34 ; CHECK-LABEL: vp_abs_nxv2i8:
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
37 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
38 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
40 %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
41 ret <vscale x 2 x i8> %v
44 define <vscale x 2 x i8> @vp_abs_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
45 ; CHECK-LABEL: vp_abs_nxv2i8_unmasked:
47 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
48 ; CHECK-NEXT: vrsub.vi v9, v8, 0
49 ; CHECK-NEXT: vmax.vv v8, v8, v9
51 %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
52 ret <vscale x 2 x i8> %v
55 declare <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8>, i1 immarg, <vscale x 4 x i1>, i32)
57 define <vscale x 4 x i8> @vp_abs_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
58 ; CHECK-LABEL: vp_abs_nxv4i8:
60 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
61 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
62 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
64 %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
65 ret <vscale x 4 x i8> %v
68 define <vscale x 4 x i8> @vp_abs_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
69 ; CHECK-LABEL: vp_abs_nxv4i8_unmasked:
71 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
72 ; CHECK-NEXT: vrsub.vi v9, v8, 0
73 ; CHECK-NEXT: vmax.vv v8, v8, v9
75 %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
76 ret <vscale x 4 x i8> %v
79 declare <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8>, i1 immarg, <vscale x 8 x i1>, i32)
81 define <vscale x 8 x i8> @vp_abs_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
82 ; CHECK-LABEL: vp_abs_nxv8i8:
84 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
85 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
86 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
88 %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
89 ret <vscale x 8 x i8> %v
92 define <vscale x 8 x i8> @vp_abs_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
93 ; CHECK-LABEL: vp_abs_nxv8i8_unmasked:
95 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
96 ; CHECK-NEXT: vrsub.vi v9, v8, 0
97 ; CHECK-NEXT: vmax.vv v8, v8, v9
99 %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
100 ret <vscale x 8 x i8> %v
103 declare <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8>, i1 immarg, <vscale x 16 x i1>, i32)
105 define <vscale x 16 x i8> @vp_abs_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
106 ; CHECK-LABEL: vp_abs_nxv16i8:
108 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
109 ; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
110 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
112 %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
113 ret <vscale x 16 x i8> %v
116 define <vscale x 16 x i8> @vp_abs_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
117 ; CHECK-LABEL: vp_abs_nxv16i8_unmasked:
119 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
120 ; CHECK-NEXT: vrsub.vi v10, v8, 0
121 ; CHECK-NEXT: vmax.vv v8, v8, v10
123 %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
124 ret <vscale x 16 x i8> %v
127 declare <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8>, i1 immarg, <vscale x 32 x i1>, i32)
129 define <vscale x 32 x i8> @vp_abs_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
130 ; CHECK-LABEL: vp_abs_nxv32i8:
132 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
133 ; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
134 ; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
136 %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
137 ret <vscale x 32 x i8> %v
140 define <vscale x 32 x i8> @vp_abs_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
141 ; CHECK-LABEL: vp_abs_nxv32i8_unmasked:
143 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
144 ; CHECK-NEXT: vrsub.vi v12, v8, 0
145 ; CHECK-NEXT: vmax.vv v8, v8, v12
147 %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
148 ret <vscale x 32 x i8> %v
151 declare <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8>, i1 immarg, <vscale x 64 x i1>, i32)
153 define <vscale x 64 x i8> @vp_abs_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
154 ; CHECK-LABEL: vp_abs_nxv64i8:
156 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
157 ; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
158 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
160 %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> %m, i32 %evl)
161 ret <vscale x 64 x i8> %v
164 define <vscale x 64 x i8> @vp_abs_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
165 ; CHECK-LABEL: vp_abs_nxv64i8_unmasked:
167 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
168 ; CHECK-NEXT: vrsub.vi v16, v8, 0
169 ; CHECK-NEXT: vmax.vv v8, v8, v16
171 %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> splat (i1 true), i32 %evl)
172 ret <vscale x 64 x i8> %v
175 declare <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16>, i1 immarg, <vscale x 1 x i1>, i32)
177 define <vscale x 1 x i16> @vp_abs_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
178 ; CHECK-LABEL: vp_abs_nxv1i16:
180 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
181 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
182 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
184 %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
185 ret <vscale x 1 x i16> %v
188 define <vscale x 1 x i16> @vp_abs_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
189 ; CHECK-LABEL: vp_abs_nxv1i16_unmasked:
191 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
192 ; CHECK-NEXT: vrsub.vi v9, v8, 0
193 ; CHECK-NEXT: vmax.vv v8, v8, v9
195 %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
196 ret <vscale x 1 x i16> %v
199 declare <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16>, i1 immarg, <vscale x 2 x i1>, i32)
201 define <vscale x 2 x i16> @vp_abs_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
202 ; CHECK-LABEL: vp_abs_nxv2i16:
204 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
205 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
206 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
208 %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
209 ret <vscale x 2 x i16> %v
212 define <vscale x 2 x i16> @vp_abs_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
213 ; CHECK-LABEL: vp_abs_nxv2i16_unmasked:
215 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
216 ; CHECK-NEXT: vrsub.vi v9, v8, 0
217 ; CHECK-NEXT: vmax.vv v8, v8, v9
219 %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
220 ret <vscale x 2 x i16> %v
223 declare <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16>, i1 immarg, <vscale x 4 x i1>, i32)
225 define <vscale x 4 x i16> @vp_abs_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
226 ; CHECK-LABEL: vp_abs_nxv4i16:
228 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
229 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
230 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
232 %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
233 ret <vscale x 4 x i16> %v
236 define <vscale x 4 x i16> @vp_abs_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
237 ; CHECK-LABEL: vp_abs_nxv4i16_unmasked:
239 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
240 ; CHECK-NEXT: vrsub.vi v9, v8, 0
241 ; CHECK-NEXT: vmax.vv v8, v8, v9
243 %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
244 ret <vscale x 4 x i16> %v
247 declare <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16>, i1 immarg, <vscale x 8 x i1>, i32)
249 define <vscale x 8 x i16> @vp_abs_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
250 ; CHECK-LABEL: vp_abs_nxv8i16:
252 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
253 ; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
254 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
256 %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
257 ret <vscale x 8 x i16> %v
260 define <vscale x 8 x i16> @vp_abs_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
261 ; CHECK-LABEL: vp_abs_nxv8i16_unmasked:
263 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
264 ; CHECK-NEXT: vrsub.vi v10, v8, 0
265 ; CHECK-NEXT: vmax.vv v8, v8, v10
267 %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
268 ret <vscale x 8 x i16> %v
271 declare <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16>, i1 immarg, <vscale x 16 x i1>, i32)
273 define <vscale x 16 x i16> @vp_abs_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
274 ; CHECK-LABEL: vp_abs_nxv16i16:
276 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
277 ; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
278 ; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
280 %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
281 ret <vscale x 16 x i16> %v
284 define <vscale x 16 x i16> @vp_abs_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
285 ; CHECK-LABEL: vp_abs_nxv16i16_unmasked:
287 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
288 ; CHECK-NEXT: vrsub.vi v12, v8, 0
289 ; CHECK-NEXT: vmax.vv v8, v8, v12
291 %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
292 ret <vscale x 16 x i16> %v
295 declare <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16>, i1 immarg, <vscale x 32 x i1>, i32)
297 define <vscale x 32 x i16> @vp_abs_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
298 ; CHECK-LABEL: vp_abs_nxv32i16:
300 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
301 ; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
302 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
304 %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
305 ret <vscale x 32 x i16> %v
308 define <vscale x 32 x i16> @vp_abs_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
309 ; CHECK-LABEL: vp_abs_nxv32i16_unmasked:
311 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
312 ; CHECK-NEXT: vrsub.vi v16, v8, 0
313 ; CHECK-NEXT: vmax.vv v8, v8, v16
315 %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
316 ret <vscale x 32 x i16> %v
319 declare <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32>, i1 immarg, <vscale x 1 x i1>, i32)
321 define <vscale x 1 x i32> @vp_abs_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
322 ; CHECK-LABEL: vp_abs_nxv1i32:
324 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
325 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
326 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
328 %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
329 ret <vscale x 1 x i32> %v
332 define <vscale x 1 x i32> @vp_abs_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
333 ; CHECK-LABEL: vp_abs_nxv1i32_unmasked:
335 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
336 ; CHECK-NEXT: vrsub.vi v9, v8, 0
337 ; CHECK-NEXT: vmax.vv v8, v8, v9
339 %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
340 ret <vscale x 1 x i32> %v
343 declare <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32>, i1 immarg, <vscale x 2 x i1>, i32)
345 define <vscale x 2 x i32> @vp_abs_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
346 ; CHECK-LABEL: vp_abs_nxv2i32:
348 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
349 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
350 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
352 %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
353 ret <vscale x 2 x i32> %v
356 define <vscale x 2 x i32> @vp_abs_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
357 ; CHECK-LABEL: vp_abs_nxv2i32_unmasked:
359 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
360 ; CHECK-NEXT: vrsub.vi v9, v8, 0
361 ; CHECK-NEXT: vmax.vv v8, v8, v9
363 %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
364 ret <vscale x 2 x i32> %v
367 declare <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32>, i1 immarg, <vscale x 4 x i1>, i32)
369 define <vscale x 4 x i32> @vp_abs_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
370 ; CHECK-LABEL: vp_abs_nxv4i32:
372 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
373 ; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
374 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
376 %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
377 ret <vscale x 4 x i32> %v
380 define <vscale x 4 x i32> @vp_abs_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
381 ; CHECK-LABEL: vp_abs_nxv4i32_unmasked:
383 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
384 ; CHECK-NEXT: vrsub.vi v10, v8, 0
385 ; CHECK-NEXT: vmax.vv v8, v8, v10
387 %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
388 ret <vscale x 4 x i32> %v
391 declare <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32>, i1 immarg, <vscale x 8 x i1>, i32)
393 define <vscale x 8 x i32> @vp_abs_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
394 ; CHECK-LABEL: vp_abs_nxv8i32:
396 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
397 ; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
398 ; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
400 %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
401 ret <vscale x 8 x i32> %v
404 define <vscale x 8 x i32> @vp_abs_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
405 ; CHECK-LABEL: vp_abs_nxv8i32_unmasked:
407 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
408 ; CHECK-NEXT: vrsub.vi v12, v8, 0
409 ; CHECK-NEXT: vmax.vv v8, v8, v12
411 %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
412 ret <vscale x 8 x i32> %v
415 declare <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32>, i1 immarg, <vscale x 16 x i1>, i32)
417 define <vscale x 16 x i32> @vp_abs_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
418 ; CHECK-LABEL: vp_abs_nxv16i32:
420 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
421 ; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
422 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
424 %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
425 ret <vscale x 16 x i32> %v
428 define <vscale x 16 x i32> @vp_abs_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
429 ; CHECK-LABEL: vp_abs_nxv16i32_unmasked:
431 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
432 ; CHECK-NEXT: vrsub.vi v16, v8, 0
433 ; CHECK-NEXT: vmax.vv v8, v8, v16
435 %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
436 ret <vscale x 16 x i32> %v
439 declare <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64>, i1 immarg, <vscale x 1 x i1>, i32)
441 define <vscale x 1 x i64> @vp_abs_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
442 ; CHECK-LABEL: vp_abs_nxv1i64:
444 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
445 ; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t
446 ; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
448 %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
449 ret <vscale x 1 x i64> %v
452 define <vscale x 1 x i64> @vp_abs_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
453 ; CHECK-LABEL: vp_abs_nxv1i64_unmasked:
455 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
456 ; CHECK-NEXT: vrsub.vi v9, v8, 0
457 ; CHECK-NEXT: vmax.vv v8, v8, v9
459 %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
460 ret <vscale x 1 x i64> %v
463 declare <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64>, i1 immarg, <vscale x 2 x i1>, i32)
465 define <vscale x 2 x i64> @vp_abs_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
466 ; CHECK-LABEL: vp_abs_nxv2i64:
468 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
469 ; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t
470 ; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
472 %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
473 ret <vscale x 2 x i64> %v
476 define <vscale x 2 x i64> @vp_abs_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
477 ; CHECK-LABEL: vp_abs_nxv2i64_unmasked:
479 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
480 ; CHECK-NEXT: vrsub.vi v10, v8, 0
481 ; CHECK-NEXT: vmax.vv v8, v8, v10
483 %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
484 ret <vscale x 2 x i64> %v
487 declare <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64>, i1 immarg, <vscale x 4 x i1>, i32)
489 define <vscale x 4 x i64> @vp_abs_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
490 ; CHECK-LABEL: vp_abs_nxv4i64:
492 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
493 ; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t
494 ; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
496 %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
497 ret <vscale x 4 x i64> %v
500 define <vscale x 4 x i64> @vp_abs_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
501 ; CHECK-LABEL: vp_abs_nxv4i64_unmasked:
503 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
504 ; CHECK-NEXT: vrsub.vi v12, v8, 0
505 ; CHECK-NEXT: vmax.vv v8, v8, v12
507 %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
508 ret <vscale x 4 x i64> %v
511 declare <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64>, i1 immarg, <vscale x 7 x i1>, i32)
513 define <vscale x 7 x i64> @vp_abs_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
514 ; CHECK-LABEL: vp_abs_nxv7i64:
516 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
517 ; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
518 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
520 %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> %m, i32 %evl)
521 ret <vscale x 7 x i64> %v
524 define <vscale x 7 x i64> @vp_abs_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32 zeroext %evl) {
525 ; CHECK-LABEL: vp_abs_nxv7i64_unmasked:
527 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
528 ; CHECK-NEXT: vrsub.vi v16, v8, 0
529 ; CHECK-NEXT: vmax.vv v8, v8, v16
531 %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> splat (i1 true), i32 %evl)
532 ret <vscale x 7 x i64> %v
535 declare <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64>, i1 immarg, <vscale x 8 x i1>, i32)
537 define <vscale x 8 x i64> @vp_abs_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
538 ; CHECK-LABEL: vp_abs_nxv8i64:
540 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
541 ; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
542 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
544 %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
545 ret <vscale x 8 x i64> %v
548 define <vscale x 8 x i64> @vp_abs_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
549 ; CHECK-LABEL: vp_abs_nxv8i64_unmasked:
551 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
552 ; CHECK-NEXT: vrsub.vi v16, v8, 0
553 ; CHECK-NEXT: vmax.vv v8, v8, v16
555 %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
556 ret <vscale x 8 x i64> %v
559 declare <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64>, i1 immarg, <vscale x 16 x i1>, i32)
561 define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
562 ; CHECK-LABEL: vp_abs_nxv16i64:
564 ; CHECK-NEXT: addi sp, sp, -16
565 ; CHECK-NEXT: .cfi_def_cfa_offset 16
566 ; CHECK-NEXT: csrr a1, vlenb
567 ; CHECK-NEXT: slli a1, a1, 4
568 ; CHECK-NEXT: sub sp, sp, a1
569 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
570 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
571 ; CHECK-NEXT: vmv1r.v v24, v0
572 ; CHECK-NEXT: csrr a1, vlenb
573 ; CHECK-NEXT: slli a1, a1, 3
574 ; CHECK-NEXT: add a1, sp, a1
575 ; CHECK-NEXT: addi a1, a1, 16
576 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
577 ; CHECK-NEXT: csrr a1, vlenb
578 ; CHECK-NEXT: srli a2, a1, 3
579 ; CHECK-NEXT: sub a3, a0, a1
580 ; CHECK-NEXT: vslidedown.vx v0, v0, a2
581 ; CHECK-NEXT: sltu a2, a0, a3
582 ; CHECK-NEXT: addi a2, a2, -1
583 ; CHECK-NEXT: and a2, a2, a3
584 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
585 ; CHECK-NEXT: vrsub.vi v8, v16, 0, v0.t
586 ; CHECK-NEXT: vmax.vv v8, v16, v8, v0.t
587 ; CHECK-NEXT: addi a2, sp, 16
588 ; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
589 ; CHECK-NEXT: bltu a0, a1, .LBB46_2
590 ; CHECK-NEXT: # %bb.1:
591 ; CHECK-NEXT: mv a0, a1
592 ; CHECK-NEXT: .LBB46_2:
593 ; CHECK-NEXT: vmv1r.v v0, v24
594 ; CHECK-NEXT: slli a1, a1, 3
595 ; CHECK-NEXT: add a1, sp, a1
596 ; CHECK-NEXT: addi a1, a1, 16
597 ; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
598 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
599 ; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
600 ; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
601 ; CHECK-NEXT: addi a0, sp, 16
602 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
603 ; CHECK-NEXT: csrr a0, vlenb
604 ; CHECK-NEXT: slli a0, a0, 4
605 ; CHECK-NEXT: add sp, sp, a0
606 ; CHECK-NEXT: .cfi_def_cfa sp, 16
607 ; CHECK-NEXT: addi sp, sp, 16
608 ; CHECK-NEXT: .cfi_def_cfa_offset 0
610 %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
611 ret <vscale x 16 x i64> %v
614 define <vscale x 16 x i64> @vp_abs_nxv16i64_unmasked(<vscale x 16 x i64> %va, i32 zeroext %evl) {
615 ; CHECK-LABEL: vp_abs_nxv16i64_unmasked:
617 ; CHECK-NEXT: csrr a1, vlenb
618 ; CHECK-NEXT: sub a2, a0, a1
619 ; CHECK-NEXT: sltu a3, a0, a2
620 ; CHECK-NEXT: addi a3, a3, -1
621 ; CHECK-NEXT: and a2, a3, a2
622 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
623 ; CHECK-NEXT: vrsub.vi v24, v16, 0
624 ; CHECK-NEXT: vmax.vv v16, v16, v24
625 ; CHECK-NEXT: bltu a0, a1, .LBB47_2
626 ; CHECK-NEXT: # %bb.1:
627 ; CHECK-NEXT: mv a0, a1
628 ; CHECK-NEXT: .LBB47_2:
629 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
630 ; CHECK-NEXT: vrsub.vi v24, v8, 0
631 ; CHECK-NEXT: vmax.vv v8, v8, v24
633 %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
634 ret <vscale x 16 x i64> %v