1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mattr=+sve < %s | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
10 define <vscale x 16 x i8> @asr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
11 ; CHECK-LABEL: asr_i8:
13 ; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b
15 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> %pg,
16 <vscale x 16 x i8> %a,
17 <vscale x 16 x i8> %b)
18 ret <vscale x 16 x i8> %out
21 define <vscale x 8 x i16> @asr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
22 ; CHECK-LABEL: asr_i16:
24 ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h
26 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> %pg,
27 <vscale x 8 x i16> %a,
28 <vscale x 8 x i16> %b)
29 ret <vscale x 8 x i16> %out
32 define <vscale x 4 x i32> @asr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
33 ; CHECK-LABEL: asr_i32:
35 ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s
37 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> %pg,
38 <vscale x 4 x i32> %a,
39 <vscale x 4 x i32> %b)
40 ret <vscale x 4 x i32> %out
43 define <vscale x 2 x i64> @asr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
44 ; CHECK-LABEL: asr_i64:
46 ; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d
48 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> %pg,
49 <vscale x 2 x i64> %a,
50 <vscale x 2 x i64> %b)
51 ret <vscale x 2 x i64> %out
58 define <vscale x 16 x i8> @asr_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
59 ; CHECK-LABEL: asr_imm_i8:
61 ; CHECK-NEXT: asr z0.b, z0.b, #3
63 %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
64 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
65 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> %pg,
66 <vscale x 16 x i8> %a,
67 <vscale x 16 x i8> %imm.splat)
68 ret <vscale x 16 x i8> %out
71 define <vscale x 8 x i16> @asr_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
72 ; CHECK-LABEL: asr_imm_i16:
74 ; CHECK-NEXT: asr z0.h, z0.h, #4
76 %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
77 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
78 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> %pg,
79 <vscale x 8 x i16> %a,
80 <vscale x 8 x i16> %imm.splat)
81 ret <vscale x 8 x i16> %out
84 define <vscale x 4 x i32> @asr_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
85 ; CHECK-LABEL: asr_imm_i32:
87 ; CHECK-NEXT: asr z0.s, z0.s, #5
89 %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
90 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
91 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> %pg,
92 <vscale x 4 x i32> %a,
93 <vscale x 4 x i32> %imm.splat)
94 ret <vscale x 4 x i32> %out
97 define <vscale x 2 x i64> @asr_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
98 ; CHECK-LABEL: asr_imm_i64:
100 ; CHECK-NEXT: asr z0.d, z0.d, #6
102 %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
103 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
104 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> %pg,
105 <vscale x 2 x i64> %a,
106 <vscale x 2 x i64> %imm.splat)
107 ret <vscale x 2 x i64> %out
114 define <vscale x 16 x i8> @lsl_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
115 ; CHECK-LABEL: lsl_i8:
117 ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b
119 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1> %pg,
120 <vscale x 16 x i8> %a,
121 <vscale x 16 x i8> %b)
122 ret <vscale x 16 x i8> %out
125 define <vscale x 8 x i16> @lsl_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
126 ; CHECK-LABEL: lsl_i16:
128 ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h
130 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1> %pg,
131 <vscale x 8 x i16> %a,
132 <vscale x 8 x i16> %b)
133 ret <vscale x 8 x i16> %out
136 define <vscale x 4 x i32> @lsl_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
137 ; CHECK-LABEL: lsl_i32:
139 ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s
141 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1> %pg,
142 <vscale x 4 x i32> %a,
143 <vscale x 4 x i32> %b)
144 ret <vscale x 4 x i32> %out
147 define <vscale x 2 x i64> @lsl_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
148 ; CHECK-LABEL: lsl_i64:
150 ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d
152 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1> %pg,
153 <vscale x 2 x i64> %a,
154 <vscale x 2 x i64> %b)
155 ret <vscale x 2 x i64> %out
162 define <vscale x 16 x i8> @lsl_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
163 ; CHECK-LABEL: lsl_imm_i8:
165 ; CHECK-NEXT: lsl z0.b, z0.b, #7
167 %imm = insertelement <vscale x 16 x i8> undef, i8 7, i32 0
168 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
169 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1> %pg,
170 <vscale x 16 x i8> %a,
171 <vscale x 16 x i8> %imm.splat)
172 ret <vscale x 16 x i8> %out
175 define <vscale x 8 x i16> @lsl_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
176 ; CHECK-LABEL: lsl_imm_i16:
178 ; CHECK-NEXT: lsl z0.h, z0.h, #8
180 %imm = insertelement <vscale x 8 x i16> undef, i16 8, i32 0
181 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
182 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1> %pg,
183 <vscale x 8 x i16> %a,
184 <vscale x 8 x i16> %imm.splat)
185 ret <vscale x 8 x i16> %out
188 define <vscale x 4 x i32> @lsl_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
189 ; CHECK-LABEL: lsl_imm_i32:
191 ; CHECK-NEXT: lsl z0.s, z0.s, #9
193 %imm = insertelement <vscale x 4 x i32> undef, i32 9, i32 0
194 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
195 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1> %pg,
196 <vscale x 4 x i32> %a,
197 <vscale x 4 x i32> %imm.splat)
198 ret <vscale x 4 x i32> %out
201 define <vscale x 2 x i64> @lsl_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
202 ; CHECK-LABEL: lsl_imm_i64:
204 ; CHECK-NEXT: lsl z0.d, z0.d, #10
206 %imm = insertelement <vscale x 2 x i64> undef, i64 10, i32 0
207 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
208 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1> %pg,
209 <vscale x 2 x i64> %a,
210 <vscale x 2 x i64> %imm.splat)
211 ret <vscale x 2 x i64> %out
218 define <vscale x 16 x i8> @lsr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
219 ; CHECK-LABEL: lsr_i8:
221 ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b
223 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> %pg,
224 <vscale x 16 x i8> %a,
225 <vscale x 16 x i8> %b)
226 ret <vscale x 16 x i8> %out
229 define <vscale x 8 x i16> @lsr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
230 ; CHECK-LABEL: lsr_i16:
232 ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h
234 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> %pg,
235 <vscale x 8 x i16> %a,
236 <vscale x 8 x i16> %b)
237 ret <vscale x 8 x i16> %out
240 define <vscale x 4 x i32> @lsr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
241 ; CHECK-LABEL: lsr_i32:
243 ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s
245 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> %pg,
246 <vscale x 4 x i32> %a,
247 <vscale x 4 x i32> %b)
248 ret <vscale x 4 x i32> %out
251 define <vscale x 2 x i64> @lsr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
252 ; CHECK-LABEL: lsr_i64:
254 ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d
256 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> %pg,
257 <vscale x 2 x i64> %a,
258 <vscale x 2 x i64> %b)
259 ret <vscale x 2 x i64> %out
266 define <vscale x 16 x i8> @lsr_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
267 ; CHECK-LABEL: lsr_imm_i8:
269 ; CHECK-NEXT: lsr z0.b, z0.b, #8
271 %imm = insertelement <vscale x 16 x i8> undef, i8 8, i32 0
272 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
273 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> %pg,
274 <vscale x 16 x i8> %a,
275 <vscale x 16 x i8> %imm.splat)
276 ret <vscale x 16 x i8> %out
279 define <vscale x 8 x i16> @lsr_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
280 ; CHECK-LABEL: lsr_imm_i16:
282 ; CHECK-NEXT: lsr z0.h, z0.h, #12
284 %imm = insertelement <vscale x 8 x i16> undef, i16 12, i32 0
285 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
286 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> %pg,
287 <vscale x 8 x i16> %a,
288 <vscale x 8 x i16> %imm.splat)
289 ret <vscale x 8 x i16> %out
292 define <vscale x 4 x i32> @lsr_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
293 ; CHECK-LABEL: lsr_imm_i32:
295 ; CHECK-NEXT: lsr z0.s, z0.s, #13
297 %imm = insertelement <vscale x 4 x i32> undef, i32 13, i32 0
298 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
299 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> %pg,
300 <vscale x 4 x i32> %a,
301 <vscale x 4 x i32> %imm.splat)
302 ret <vscale x 4 x i32> %out
305 define <vscale x 2 x i64> @lsr_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
306 ; CHECK-LABEL: lsr_imm_i64:
308 ; CHECK-NEXT: lsr z0.d, z0.d, #14
310 %imm = insertelement <vscale x 2 x i64> undef, i64 14, i32 0
311 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
312 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> %pg,
313 <vscale x 2 x i64> %a,
314 <vscale x 2 x i64> %imm.splat)
315 ret <vscale x 2 x i64> %out
318 declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
319 declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
320 declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
321 declare <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
323 declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
324 declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
325 declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
326 declare <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
328 declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
329 declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
330 declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
331 declare <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)