1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
4 define <vscale x 2 x i64> @add_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
5 ; CHECK-LABEL: add_i64:
7 ; CHECK-NEXT: add z0.d, z0.d, z1.d
9 %res = add <vscale x 2 x i64> %a, %b
10 ret <vscale x 2 x i64> %res
13 define <vscale x 4 x i32> @add_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
14 ; CHECK-LABEL: add_i32:
16 ; CHECK-NEXT: add z0.s, z0.s, z1.s
18 %res = add <vscale x 4 x i32> %a, %b
19 ret <vscale x 4 x i32> %res
22 define <vscale x 8 x i16> @add_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
23 ; CHECK-LABEL: add_i16:
25 ; CHECK-NEXT: add z0.h, z0.h, z1.h
27 %res = add <vscale x 8 x i16> %a, %b
28 ret <vscale x 8 x i16> %res
31 define <vscale x 16 x i8> @add_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
32 ; CHECK-LABEL: add_i8:
34 ; CHECK-NEXT: add z0.b, z0.b, z1.b
36 %res = add <vscale x 16 x i8> %a, %b
37 ret <vscale x 16 x i8> %res
40 define <vscale x 16 x i8> @add_i8_zero(<vscale x 16 x i8> %a) {
41 ; CHECK-LABEL: add_i8_zero:
44 %res = add <vscale x 16 x i8> %a, zeroinitializer
45 ret <vscale x 16 x i8> %res
48 define <vscale x 1 x i32> @add_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b) {
49 ; CHECK-LABEL: add_nxv1i32:
50 ; CHECK: // %bb.0: // %entry
51 ; CHECK-NEXT: add z0.s, z0.s, z1.s
54 %c = add <vscale x 1 x i32> %a, %b
55 ret <vscale x 1 x i32> %c
58 define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
59 ; CHECK-LABEL: sub_i64:
61 ; CHECK-NEXT: sub z0.d, z0.d, z1.d
63 %res = sub <vscale x 2 x i64> %a, %b
64 ret <vscale x 2 x i64> %res
67 define <vscale x 4 x i32> @sub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
68 ; CHECK-LABEL: sub_i32:
70 ; CHECK-NEXT: sub z0.s, z0.s, z1.s
72 %res = sub <vscale x 4 x i32> %a, %b
73 ret <vscale x 4 x i32> %res
76 define <vscale x 8 x i16> @sub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
77 ; CHECK-LABEL: sub_i16:
79 ; CHECK-NEXT: sub z0.h, z0.h, z1.h
81 %res = sub <vscale x 8 x i16> %a, %b
82 ret <vscale x 8 x i16> %res
85 define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
86 ; CHECK-LABEL: sub_i8:
88 ; CHECK-NEXT: sub z0.b, z0.b, z1.b
90 %res = sub <vscale x 16 x i8> %a, %b
91 ret <vscale x 16 x i8> %res
94 define <vscale x 16 x i8> @sub_i8_zero(<vscale x 16 x i8> %a) {
95 ; CHECK-LABEL: sub_i8_zero:
98 %res = sub <vscale x 16 x i8> %a, zeroinitializer
99 ret <vscale x 16 x i8> %res
102 define <vscale x 16 x i8> @abs_nxv16i8(<vscale x 16 x i8> %a) {
103 ; CHECK-LABEL: abs_nxv16i8:
105 ; CHECK-NEXT: ptrue p0.b
106 ; CHECK-NEXT: abs z0.b, p0/m, z0.b
108 %res = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %a, i1 false)
109 ret <vscale x 16 x i8> %res
112 define <vscale x 8 x i16> @abs_nxv8i16(<vscale x 8 x i16> %a) {
113 ; CHECK-LABEL: abs_nxv8i16:
115 ; CHECK-NEXT: ptrue p0.h
116 ; CHECK-NEXT: abs z0.h, p0/m, z0.h
118 %res = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %a, i1 false)
119 ret <vscale x 8 x i16> %res
122 define <vscale x 4 x i32> @abs_nxv4i32(<vscale x 4 x i32> %a) {
123 ; CHECK-LABEL: abs_nxv4i32:
125 ; CHECK-NEXT: ptrue p0.s
126 ; CHECK-NEXT: abs z0.s, p0/m, z0.s
128 %res = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %a, i1 false)
129 ret <vscale x 4 x i32> %res
132 define <vscale x 2 x i64> @abs_nxv2i64(<vscale x 2 x i64> %a) {
133 ; CHECK-LABEL: abs_nxv2i64:
135 ; CHECK-NEXT: ptrue p0.d
136 ; CHECK-NEXT: abs z0.d, p0/m, z0.d
138 %res = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %a, i1 false)
139 ret <vscale x 2 x i64> %res
142 define <vscale x 4 x i16> @abs_nxv4i16(<vscale x 4 x i16> %a) {
143 ; CHECK-LABEL: abs_nxv4i16:
145 ; CHECK-NEXT: ptrue p0.s
146 ; CHECK-NEXT: sxth z0.s, p0/m, z0.s
147 ; CHECK-NEXT: abs z0.s, p0/m, z0.s
149 %res = call <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16> %a, i1 false)
150 ret <vscale x 4 x i16> %res
153 define <vscale x 32 x i8> @abs_nxv32i8(<vscale x 32 x i8> %a) {
154 ; CHECK-LABEL: abs_nxv32i8:
156 ; CHECK-NEXT: ptrue p0.b
157 ; CHECK-NEXT: abs z0.b, p0/m, z0.b
158 ; CHECK-NEXT: abs z1.b, p0/m, z1.b
160 %res = call <vscale x 32 x i8> @llvm.abs.nxv32i8(<vscale x 32 x i8> %a, i1 false)
161 ret <vscale x 32 x i8> %res
164 define <vscale x 8 x i64> @abs_nxv8i64(<vscale x 8 x i64> %a) {
165 ; CHECK-LABEL: abs_nxv8i64:
167 ; CHECK-NEXT: ptrue p0.d
168 ; CHECK-NEXT: abs z0.d, p0/m, z0.d
169 ; CHECK-NEXT: abs z1.d, p0/m, z1.d
170 ; CHECK-NEXT: abs z2.d, p0/m, z2.d
171 ; CHECK-NEXT: abs z3.d, p0/m, z3.d
173 %res = call <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64> %a, i1 false)
174 ret <vscale x 8 x i64> %res
177 define <vscale x 2 x i64> @sqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
178 ; CHECK-LABEL: sqadd_i64:
180 ; CHECK-NEXT: sqadd z0.d, z0.d, z1.d
182 %res = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
183 ret <vscale x 2 x i64> %res
186 define <vscale x 4 x i32> @sqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
187 ; CHECK-LABEL: sqadd_i32:
189 ; CHECK-NEXT: sqadd z0.s, z0.s, z1.s
191 %res = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
192 ret <vscale x 4 x i32> %res
195 define <vscale x 4 x i32> @sqadd_i32_zero(<vscale x 4 x i32> %a) {
196 ; CHECK-LABEL: sqadd_i32_zero:
199 %res = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer)
200 ret <vscale x 4 x i32> %res
203 define <vscale x 8 x i16> @sqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
204 ; CHECK-LABEL: sqadd_i16:
206 ; CHECK-NEXT: sqadd z0.h, z0.h, z1.h
208 %res = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
209 ret <vscale x 8 x i16> %res
212 define <vscale x 16 x i8> @sqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
213 ; CHECK-LABEL: sqadd_i8:
215 ; CHECK-NEXT: sqadd z0.b, z0.b, z1.b
217 %res = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
218 ret <vscale x 16 x i8> %res
222 define <vscale x 2 x i64> @sqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
223 ; CHECK-LABEL: sqsub_i64:
225 ; CHECK-NEXT: sqsub z0.d, z0.d, z1.d
227 %res = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
228 ret <vscale x 2 x i64> %res
231 define <vscale x 2 x i64> @sqsub_i64_zero(<vscale x 2 x i64> %a) {
232 ; CHECK-LABEL: sqsub_i64_zero:
235 %res = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer)
236 ret <vscale x 2 x i64> %res
239 define <vscale x 4 x i32> @sqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
240 ; CHECK-LABEL: sqsub_i32:
242 ; CHECK-NEXT: sqsub z0.s, z0.s, z1.s
244 %res = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
245 ret <vscale x 4 x i32> %res
248 define <vscale x 8 x i16> @sqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
249 ; CHECK-LABEL: sqsub_i16:
251 ; CHECK-NEXT: sqsub z0.h, z0.h, z1.h
253 %res = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
254 ret <vscale x 8 x i16> %res
257 define <vscale x 16 x i8> @sqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
258 ; CHECK-LABEL: sqsub_i8:
260 ; CHECK-NEXT: sqsub z0.b, z0.b, z1.b
262 %res = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
263 ret <vscale x 16 x i8> %res
267 define <vscale x 2 x i64> @uqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
268 ; CHECK-LABEL: uqadd_i64:
270 ; CHECK-NEXT: uqadd z0.d, z0.d, z1.d
272 %res = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
273 ret <vscale x 2 x i64> %res
276 define <vscale x 4 x i32> @uqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
277 ; CHECK-LABEL: uqadd_i32:
279 ; CHECK-NEXT: uqadd z0.s, z0.s, z1.s
281 %res = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
282 ret <vscale x 4 x i32> %res
285 define <vscale x 8 x i16> @uqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
286 ; CHECK-LABEL: uqadd_i16:
288 ; CHECK-NEXT: uqadd z0.h, z0.h, z1.h
290 %res = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
291 ret <vscale x 8 x i16> %res
294 define <vscale x 16 x i8> @uqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
295 ; CHECK-LABEL: uqadd_i8:
297 ; CHECK-NEXT: uqadd z0.b, z0.b, z1.b
299 %res = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
300 ret <vscale x 16 x i8> %res
304 define <vscale x 2 x i64> @uqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
305 ; CHECK-LABEL: uqsub_i64:
307 ; CHECK-NEXT: uqsub z0.d, z0.d, z1.d
309 %res = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
310 ret <vscale x 2 x i64> %res
313 define <vscale x 4 x i32> @uqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
314 ; CHECK-LABEL: uqsub_i32:
316 ; CHECK-NEXT: uqsub z0.s, z0.s, z1.s
318 %res = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
319 ret <vscale x 4 x i32> %res
322 define <vscale x 8 x i16> @uqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
323 ; CHECK-LABEL: uqsub_i16:
325 ; CHECK-NEXT: uqsub z0.h, z0.h, z1.h
327 %res = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
328 ret <vscale x 8 x i16> %res
331 define <vscale x 16 x i8> @uqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
332 ; CHECK-LABEL: uqsub_i8:
334 ; CHECK-NEXT: uqsub z0.b, z0.b, z1.b
336 %res = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
337 ret <vscale x 16 x i8> %res
340 define <vscale x 16 x i8> @mla_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
341 ; CHECK-LABEL: mla_i8:
343 ; CHECK-NEXT: ptrue p0.b
344 ; CHECK-NEXT: mla z2.b, p0/m, z0.b, z1.b
345 ; CHECK-NEXT: mov z0.d, z2.d
347 %prod = mul <vscale x 16 x i8> %a, %b
348 %res = add <vscale x 16 x i8> %c, %prod
349 ret <vscale x 16 x i8> %res
352 define <vscale x 16 x i8> @mla_i8_multiuse(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i8>* %p) {
353 ; CHECK-LABEL: mla_i8_multiuse:
355 ; CHECK-NEXT: ptrue p0.b
356 ; CHECK-NEXT: mul z1.b, p0/m, z1.b, z0.b
357 ; CHECK-NEXT: add z0.b, z2.b, z1.b
358 ; CHECK-NEXT: st1b { z1.b }, p0, [x0]
360 %prod = mul <vscale x 16 x i8> %a, %b
361 store <vscale x 16 x i8> %prod, <vscale x 16 x i8>* %p
362 %res = add <vscale x 16 x i8> %c, %prod
363 ret <vscale x 16 x i8> %res
366 define <vscale x 16 x i8> @mls_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
367 ; CHECK-LABEL: mls_i8:
369 ; CHECK-NEXT: ptrue p0.b
370 ; CHECK-NEXT: mls z2.b, p0/m, z0.b, z1.b
371 ; CHECK-NEXT: mov z0.d, z2.d
373 %prod = mul <vscale x 16 x i8> %a, %b
374 %res = sub <vscale x 16 x i8> %c, %prod
375 ret <vscale x 16 x i8> %res
378 declare <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
379 declare <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
380 declare <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
381 declare <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
383 declare <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
384 declare <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
385 declare <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
386 declare <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
388 declare <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
389 declare <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
390 declare <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
391 declare <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
393 declare <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
394 declare <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
395 declare <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
396 declare <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
398 declare <vscale x 32 x i8> @llvm.abs.nxv32i8(<vscale x 32 x i8>, i1)
399 declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
400 declare <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16>, i1)
401 declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
402 declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
403 declare <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64>, i1)
404 declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)