1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple aarch64-unknown-linux-gnu -mattr=+sve2 < %s | FileCheck %s
4 ; SABA from ADD(ABS(SUB NSW))
6 define <vscale x 2 x i64> @saba_abs_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 {
7 ; CHECK-LABEL: saba_abs_d:
9 ; CHECK-NEXT: saba z0.d, z1.d, z2.d
11 %sub = sub nsw <vscale x 2 x i64> %b, %c
12 %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true)
13 %add = add <vscale x 2 x i64> %a, %abs
14 ret <vscale x 2 x i64> %add
17 define <vscale x 4 x i32> @saba_abs_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 {
18 ; CHECK-LABEL: saba_abs_s:
20 ; CHECK-NEXT: saba z0.s, z1.s, z2.s
22 %sub = sub nsw <vscale x 4 x i32> %b, %c
23 %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true)
24 %add = add <vscale x 4 x i32> %a, %abs
25 ret <vscale x 4 x i32> %add
28 define <vscale x 8 x i16> @saba_abs_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 {
29 ; CHECK-LABEL: saba_abs_h:
31 ; CHECK-NEXT: saba z0.h, z1.h, z2.h
33 %sub = sub nsw <vscale x 8 x i16> %b, %c
34 %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true)
35 %add = add <vscale x 8 x i16> %a, %abs
36 ret <vscale x 8 x i16> %add
39 define <vscale x 16 x i8> @saba_abs_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 {
40 ; CHECK-LABEL: saba_abs_b:
42 ; CHECK-NEXT: saba z0.b, z1.b, z2.b
44 %sub = sub nsw <vscale x 16 x i8> %b, %c
45 %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true)
46 %add = add <vscale x 16 x i8> %a, %abs
47 ret <vscale x 16 x i8> %add
52 define <vscale x 2 x i64> @saba_sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 {
53 ; CHECK-LABEL: saba_sabd_d:
55 ; CHECK-NEXT: saba z0.d, z1.d, z2.d
57 %true = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
58 %sabd = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.u.nxv2i64(<vscale x 2 x i1> %true, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c)
59 %add = add <vscale x 2 x i64> %sabd, %a
60 ret <vscale x 2 x i64> %add
63 define <vscale x 4 x i32> @saba_sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 {
64 ; CHECK-LABEL: saba_sabd_s:
66 ; CHECK-NEXT: saba z0.s, z1.s, z2.s
68 %true = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
69 %sabd = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.u.nxv4i32(<vscale x 4 x i1> %true, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c)
70 %add = add <vscale x 4 x i32> %sabd, %a
71 ret <vscale x 4 x i32> %add
74 define <vscale x 8 x i16> @saba_sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 {
75 ; CHECK-LABEL: saba_sabd_h:
77 ; CHECK-NEXT: saba z0.h, z1.h, z2.h
79 %true = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
80 %sabd = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.u.nxv8i16(<vscale x 8 x i1> %true, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c)
81 %add = add <vscale x 8 x i16> %sabd, %a
82 ret <vscale x 8 x i16> %add
85 define <vscale x 16 x i8> @saba_sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 {
86 ; CHECK-LABEL: saba_sabd_b:
88 ; CHECK-NEXT: saba z0.b, z1.b, z2.b
90 %true = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
91 %sabd = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.u.nxv16i8(<vscale x 16 x i1> %true, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c)
92 %add = add <vscale x 16 x i8> %sabd, %a
93 ret <vscale x 16 x i8> %add
96 declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
97 declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
98 declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
99 declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
101 declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32)
102 declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32)
103 declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32)
104 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32)
106 declare <vscale x 2 x i64> @llvm.aarch64.sve.sabd.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
107 declare <vscale x 4 x i32> @llvm.aarch64.sve.sabd.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
108 declare <vscale x 8 x i16> @llvm.aarch64.sve.sabd.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
109 declare <vscale x 16 x i8> @llvm.aarch64.sve.sabd.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)