1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -force-streaming -verify-machineinstrs < %s | FileCheck %s
8 define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x2_s16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
9 ; CHECK-LABEL: multi_vector_sat_shift_narrow_x2_s16:
11 ; CHECK-NEXT: mov z3.d, z2.d
12 ; CHECK-NEXT: mov z2.d, z1.d
13 ; CHECK-NEXT: sqrshr z0.h, { z2.s, z3.s }, #16
15 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
16 ret <vscale x 8 x i16> %res
19 define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x2_u16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
20 ; CHECK-LABEL: multi_vector_sat_shift_narrow_x2_u16:
22 ; CHECK-NEXT: mov z3.d, z2.d
23 ; CHECK-NEXT: mov z2.d, z1.d
24 ; CHECK-NEXT: uqrshr z0.h, { z2.s, z3.s }, #16
26 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
27 ret <vscale x 8 x i16> %res
34 define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_x4_s8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
35 ; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_s8:
37 ; CHECK-NEXT: mov z7.d, z4.d
38 ; CHECK-NEXT: mov z6.d, z3.d
39 ; CHECK-NEXT: mov z5.d, z2.d
40 ; CHECK-NEXT: mov z4.d, z1.d
41 ; CHECK-NEXT: sqrshr z0.b, { z4.s - z7.s }, #32
43 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
44 ret <vscale x 16 x i8> %res
47 define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x4_s16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
48 ; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_s16:
50 ; CHECK-NEXT: mov z7.d, z4.d
51 ; CHECK-NEXT: mov z6.d, z3.d
52 ; CHECK-NEXT: mov z5.d, z2.d
53 ; CHECK-NEXT: mov z4.d, z1.d
54 ; CHECK-NEXT: sqrshr z0.h, { z4.d - z7.d }, #64
56 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
57 ret <vscale x 8 x i16> %res
60 define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
61 ; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_u8:
63 ; CHECK-NEXT: mov z7.d, z4.d
64 ; CHECK-NEXT: mov z6.d, z3.d
65 ; CHECK-NEXT: mov z5.d, z2.d
66 ; CHECK-NEXT: mov z4.d, z1.d
67 ; CHECK-NEXT: uqrshr z0.b, { z4.s - z7.s }, #32
69 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshr.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
70 ret <vscale x 16 x i8> %res
73 define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
74 ; CHECK-LABEL: multi_vector_sat_shift_narrow_x4_u16:
76 ; CHECK-NEXT: mov z7.d, z4.d
77 ; CHECK-NEXT: mov z6.d, z3.d
78 ; CHECK-NEXT: mov z5.d, z2.d
79 ; CHECK-NEXT: mov z4.d, z1.d
80 ; CHECK-NEXT: uqrshr z0.h, { z4.d - z7.d }, #64
82 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
83 ret <vscale x 8 x i16> %res
88 define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_interleave_x4_s8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
89 ; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_s8:
91 ; CHECK-NEXT: mov z7.d, z4.d
92 ; CHECK-NEXT: mov z6.d, z3.d
93 ; CHECK-NEXT: mov z5.d, z2.d
94 ; CHECK-NEXT: mov z4.d, z1.d
95 ; CHECK-NEXT: sqrshrn z0.b, { z4.s - z7.s }, #32
97 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrn.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
98 ret <vscale x 16 x i8> %res
101 define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_interleave_x4_s16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
102 ; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_s16:
104 ; CHECK-NEXT: mov z7.d, z4.d
105 ; CHECK-NEXT: mov z6.d, z3.d
106 ; CHECK-NEXT: mov z5.d, z2.d
107 ; CHECK-NEXT: mov z4.d, z1.d
108 ; CHECK-NEXT: sqrshrn z0.h, { z4.d - z7.d }, #64
110 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrn.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
111 ret <vscale x 8 x i16> %res
114 define <vscale x 16 x i8> @multi_vector_sat_shift_narrow_interleave_x4_u8(<vscale x 2 x i64> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
115 ; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_u8:
117 ; CHECK-NEXT: mov z7.d, z4.d
118 ; CHECK-NEXT: mov z6.d, z3.d
119 ; CHECK-NEXT: mov z5.d, z2.d
120 ; CHECK-NEXT: mov z4.d, z1.d
121 ; CHECK-NEXT: uqrshrn z0.b, { z4.s - z7.s }, #32
123 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrn.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
124 ret <vscale x 16 x i8> %res
127 define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_interleave_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
128 ; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x4_u16:
130 ; CHECK-NEXT: mov z7.d, z4.d
131 ; CHECK-NEXT: mov z6.d, z3.d
132 ; CHECK-NEXT: mov z5.d, z2.d
133 ; CHECK-NEXT: mov z4.d, z1.d
134 ; CHECK-NEXT: uqrshrn z0.h, { z4.d - z7.d }, #64
136 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrn.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
137 ret <vscale x 8 x i16> %res
142 define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_x2_u16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
143 ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x2_u16:
145 ; CHECK-NEXT: mov z3.d, z2.d
146 ; CHECK-NEXT: mov z2.d, z1.d
147 ; CHECK-NEXT: sqrshru z0.h, { z2.s, z3.s }, #16
149 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
150 ret <vscale x 8 x i16> %res
155 define <vscale x 16 x i8> @multi_vector_sat_shift_unsigned_narrow_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
156 ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x4_u8:
158 ; CHECK-NEXT: mov z7.d, z4.d
159 ; CHECK-NEXT: mov z6.d, z3.d
160 ; CHECK-NEXT: mov z5.d, z2.d
161 ; CHECK-NEXT: mov z4.d, z1.d
162 ; CHECK-NEXT: sqrshru z0.b, { z4.s - z7.s }, #32
164 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshru.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
165 ret <vscale x 16 x i8> %res
168 define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
169 ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_x4_u16:
171 ; CHECK-NEXT: mov z7.d, z4.d
172 ; CHECK-NEXT: mov z6.d, z3.d
173 ; CHECK-NEXT: mov z5.d, z2.d
174 ; CHECK-NEXT: mov z4.d, z1.d
175 ; CHECK-NEXT: sqrshru z0.h, { z4.d - z7.d }, #64
177 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
178 ret <vscale x 8 x i16> %res
183 define <vscale x 16 x i8> @multi_vector_sat_shift_unsigned_narrow_interleave_x4_u8(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4) {
184 ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_interleave_x4_u8:
186 ; CHECK-NEXT: mov z7.d, z4.d
187 ; CHECK-NEXT: mov z6.d, z3.d
188 ; CHECK-NEXT: mov z5.d, z2.d
189 ; CHECK-NEXT: mov z4.d, z1.d
190 ; CHECK-NEXT: sqrshrun z0.b, { z4.s - z7.s }, #32
192 %res = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrun.x4.nxv16i8(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, <vscale x 4 x i32> %zn3, <vscale x 4 x i32> %zn4, i32 32)
193 ret <vscale x 16 x i8> %res
196 define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_interleave_x4_u16(<vscale x 2 x i64> %unused, <vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4) {
197 ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_interleave_x4_u16:
199 ; CHECK-NEXT: mov z7.d, z4.d
200 ; CHECK-NEXT: mov z6.d, z3.d
201 ; CHECK-NEXT: mov z5.d, z2.d
202 ; CHECK-NEXT: mov z4.d, z1.d
203 ; CHECK-NEXT: sqrshrun z0.h, { z4.d - z7.d }, #64
205 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrun.x4.nxv8i16(<vscale x 2 x i64> %zn1, <vscale x 2 x i64> %zn2, <vscale x 2 x i64> %zn3, <vscale x 2 x i64> %zn4, i32 64)
206 ret <vscale x 8 x i16> %res
209 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
211 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshr.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
212 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshr.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
214 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
216 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshr.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
217 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshr.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
219 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrn.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
220 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrn.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
222 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrn.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
223 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrn.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
225 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
227 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshru.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
228 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshru.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
230 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrun.x4.nxv16i8(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
231 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrun.x4.nxv8i16(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)