1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mattr=+sve < %s | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
10 define <vscale x 16 x i8> @and_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
11 ; CHECK-LABEL: and_i8:
13 ; CHECK-NEXT: and z0.d, z0.d, z1.d
15 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.u.nxv16i8(<vscale x 16 x i1> %pg,
16 <vscale x 16 x i8> %a,
17 <vscale x 16 x i8> %b)
18 ret <vscale x 16 x i8> %out
21 define <vscale x 8 x i16> @and_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
22 ; CHECK-LABEL: and_i16:
24 ; CHECK-NEXT: and z0.d, z0.d, z1.d
26 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1> %pg,
27 <vscale x 8 x i16> %a,
28 <vscale x 8 x i16> %b)
29 ret <vscale x 8 x i16> %out
32 define <vscale x 4 x i32> @and_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
33 ; CHECK-LABEL: and_i32:
35 ; CHECK-NEXT: and z0.d, z0.d, z1.d
37 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> %pg,
38 <vscale x 4 x i32> %a,
39 <vscale x 4 x i32> %b)
40 ret <vscale x 4 x i32> %out
43 define <vscale x 2 x i64> @and_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
44 ; CHECK-LABEL: and_i64:
46 ; CHECK-NEXT: and z0.d, z0.d, z1.d
48 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> %pg,
49 <vscale x 2 x i64> %a,
50 <vscale x 2 x i64> %b)
51 ret <vscale x 2 x i64> %out
58 define <vscale x 16 x i8> @and_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
59 ; CHECK-LABEL: and_imm_i8:
61 ; CHECK-NEXT: and z0.b, z0.b, #0x3
63 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.u.nxv16i8(<vscale x 16 x i1> %pg,
64 <vscale x 16 x i8> %a,
65 <vscale x 16 x i8> splat(i8 3))
66 ret <vscale x 16 x i8> %out
69 define <vscale x 8 x i16> @and_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
70 ; CHECK-LABEL: and_imm_i16:
72 ; CHECK-NEXT: and z0.h, z0.h, #0x4
74 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1> %pg,
75 <vscale x 8 x i16> %a,
76 <vscale x 8 x i16> splat(i16 4))
77 ret <vscale x 8 x i16> %out
80 define <vscale x 4 x i32> @and_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
81 ; CHECK-LABEL: and_imm_i32:
83 ; CHECK-NEXT: and z0.s, z0.s, #0x10
85 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1> %pg,
86 <vscale x 4 x i32> %a,
87 <vscale x 4 x i32> splat(i32 16))
88 ret <vscale x 4 x i32> %out
91 define <vscale x 2 x i64> @and_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
92 ; CHECK-LABEL: and_imm_i64:
94 ; CHECK-NEXT: and z0.d, z0.d, #0x20
96 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1> %pg,
97 <vscale x 2 x i64> %a,
98 <vscale x 2 x i64> splat(i64 32))
99 ret <vscale x 2 x i64> %out
106 define <vscale x 16 x i8> @eor_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
107 ; CHECK-LABEL: eor_i8:
109 ; CHECK-NEXT: eor z0.d, z0.d, z1.d
111 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.u.nxv16i8(<vscale x 16 x i1> %pg,
112 <vscale x 16 x i8> %a,
113 <vscale x 16 x i8> %b)
114 ret <vscale x 16 x i8> %out
117 define <vscale x 8 x i16> @eor_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
118 ; CHECK-LABEL: eor_i16:
120 ; CHECK-NEXT: eor z0.d, z0.d, z1.d
122 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.u.nxv8i16(<vscale x 8 x i1> %pg,
123 <vscale x 8 x i16> %a,
124 <vscale x 8 x i16> %b)
125 ret <vscale x 8 x i16> %out
128 define <vscale x 4 x i32> @eor_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
129 ; CHECK-LABEL: eor_i32:
131 ; CHECK-NEXT: eor z0.d, z0.d, z1.d
133 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.u.nxv4i32(<vscale x 4 x i1> %pg,
134 <vscale x 4 x i32> %a,
135 <vscale x 4 x i32> %b)
136 ret <vscale x 4 x i32> %out
139 define <vscale x 2 x i64> @eor_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
140 ; CHECK-LABEL: eor_i64:
142 ; CHECK-NEXT: eor z0.d, z0.d, z1.d
144 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.u.nxv2i64(<vscale x 2 x i1> %pg,
145 <vscale x 2 x i64> %a,
146 <vscale x 2 x i64> %b)
147 ret <vscale x 2 x i64> %out
154 define <vscale x 16 x i8> @eor_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
155 ; CHECK-LABEL: eor_imm_i8:
157 ; CHECK-NEXT: eor z0.b, z0.b, #0x7
159 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.eor.u.nxv16i8(<vscale x 16 x i1> %pg,
160 <vscale x 16 x i8> %a,
161 <vscale x 16 x i8> splat(i8 7))
162 ret <vscale x 16 x i8> %out
165 define <vscale x 8 x i16> @eor_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
166 ; CHECK-LABEL: eor_imm_i16:
168 ; CHECK-NEXT: eor z0.h, z0.h, #0x8
170 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.eor.u.nxv8i16(<vscale x 8 x i1> %pg,
171 <vscale x 8 x i16> %a,
172 <vscale x 8 x i16> splat(i16 8))
173 ret <vscale x 8 x i16> %out
176 define <vscale x 4 x i32> @eor_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
177 ; CHECK-LABEL: eor_imm_i32:
179 ; CHECK-NEXT: eor z0.s, z0.s, #0x10
181 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.eor.u.nxv4i32(<vscale x 4 x i1> %pg,
182 <vscale x 4 x i32> %a,
183 <vscale x 4 x i32> splat(i32 16))
184 ret <vscale x 4 x i32> %out
187 define <vscale x 2 x i64> @eor_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
188 ; CHECK-LABEL: eor_imm_i64:
190 ; CHECK-NEXT: eor z0.d, z0.d, #0x20
192 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.eor.u.nxv2i64(<vscale x 2 x i1> %pg,
193 <vscale x 2 x i64> %a,
194 <vscale x 2 x i64> splat(i64 32))
195 ret <vscale x 2 x i64> %out
202 define <vscale x 16 x i8> @orr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
203 ; CHECK-LABEL: orr_i8:
205 ; CHECK-NEXT: orr z0.d, z0.d, z1.d
207 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.u.nxv16i8(<vscale x 16 x i1> %pg,
208 <vscale x 16 x i8> %a,
209 <vscale x 16 x i8> %b)
210 ret <vscale x 16 x i8> %out
213 define <vscale x 8 x i16> @orr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
214 ; CHECK-LABEL: orr_i16:
216 ; CHECK-NEXT: orr z0.d, z0.d, z1.d
218 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.u.nxv8i16(<vscale x 8 x i1> %pg,
219 <vscale x 8 x i16> %a,
220 <vscale x 8 x i16> %b)
221 ret <vscale x 8 x i16> %out
224 define <vscale x 4 x i32> @orr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
225 ; CHECK-LABEL: orr_i32:
227 ; CHECK-NEXT: orr z0.d, z0.d, z1.d
229 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.u.nxv4i32(<vscale x 4 x i1> %pg,
230 <vscale x 4 x i32> %a,
231 <vscale x 4 x i32> %b)
232 ret <vscale x 4 x i32> %out
235 define <vscale x 2 x i64> @orr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
236 ; CHECK-LABEL: orr_i64:
238 ; CHECK-NEXT: orr z0.d, z0.d, z1.d
240 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.u.nxv2i64(<vscale x 2 x i1> %pg,
241 <vscale x 2 x i64> %a,
242 <vscale x 2 x i64> %b)
243 ret <vscale x 2 x i64> %out
250 define <vscale x 16 x i8> @orr_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
251 ; CHECK-LABEL: orr_imm_i8:
253 ; CHECK-NEXT: orr z0.b, z0.b, #0x8
255 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.orr.u.nxv16i8(<vscale x 16 x i1> %pg,
256 <vscale x 16 x i8> %a,
257 <vscale x 16 x i8> splat(i8 8))
258 ret <vscale x 16 x i8> %out
261 define <vscale x 8 x i16> @orr_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
262 ; CHECK-LABEL: orr_imm_i16:
264 ; CHECK-NEXT: orr z0.h, z0.h, #0xc
266 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.orr.u.nxv8i16(<vscale x 8 x i1> %pg,
267 <vscale x 8 x i16> %a,
268 <vscale x 8 x i16> splat(i16 12))
269 ret <vscale x 8 x i16> %out
272 define <vscale x 4 x i32> @orr_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
273 ; CHECK-LABEL: orr_imm_i32:
275 ; CHECK-NEXT: orr z0.s, z0.s, #0x10
277 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.orr.u.nxv4i32(<vscale x 4 x i1> %pg,
278 <vscale x 4 x i32> %a,
279 <vscale x 4 x i32> splat(i32 16))
280 ret <vscale x 4 x i32> %out
283 define <vscale x 2 x i64> @orr_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
284 ; CHECK-LABEL: orr_imm_i64:
286 ; CHECK-NEXT: orr z0.d, z0.d, #0x20
288 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.orr.u.nxv2i64(<vscale x 2 x i1> %pg,
289 <vscale x 2 x i64> %a,
290 <vscale x 2 x i64> splat(i64 32))
291 ret <vscale x 2 x i64> %out
298 define <vscale x 16 x i8> @bic_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
299 ; CHECK-LABEL: bic_i8:
301 ; CHECK-NEXT: bic z0.d, z0.d, z1.d
303 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.bic.u.nxv16i8(<vscale x 16 x i1> %pg,
304 <vscale x 16 x i8> %a,
305 <vscale x 16 x i8> %b)
306 ret <vscale x 16 x i8> %out
309 define <vscale x 8 x i16> @bic_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
310 ; CHECK-LABEL: bic_i16:
312 ; CHECK-NEXT: bic z0.d, z0.d, z1.d
314 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.bic.u.nxv8i16(<vscale x 8 x i1> %pg,
315 <vscale x 8 x i16> %a,
316 <vscale x 8 x i16> %b)
317 ret <vscale x 8 x i16> %out
320 define <vscale x 4 x i32> @bic_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
321 ; CHECK-LABEL: bic_i32:
323 ; CHECK-NEXT: bic z0.d, z0.d, z1.d
325 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.bic.u.nxv4i32(<vscale x 4 x i1> %pg,
326 <vscale x 4 x i32> %a,
327 <vscale x 4 x i32> %b)
328 ret <vscale x 4 x i32> %out
331 define <vscale x 2 x i64> @bic_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
332 ; CHECK-LABEL: bic_i64:
334 ; CHECK-NEXT: bic z0.d, z0.d, z1.d
336 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.bic.u.nxv2i64(<vscale x 2 x i1> %pg,
337 <vscale x 2 x i64> %a,
338 <vscale x 2 x i64> %b)
339 ret <vscale x 2 x i64> %out
346 define <vscale x 16 x i8> @bic_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
347 ; CHECK-LABEL: bic_imm_i8:
349 ; CHECK-NEXT: and z0.b, z0.b, #0xf8
351 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.bic.u.nxv16i8(<vscale x 16 x i1> %pg,
352 <vscale x 16 x i8> %a,
353 <vscale x 16 x i8> splat(i8 7))
354 ret <vscale x 16 x i8> %out
357 define <vscale x 8 x i16> @bic_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
358 ; CHECK-LABEL: bic_imm_i16:
360 ; CHECK-NEXT: and z0.h, z0.h, #0xfff7
362 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.bic.u.nxv8i16(<vscale x 8 x i1> %pg,
363 <vscale x 8 x i16> %a,
364 <vscale x 8 x i16> splat(i16 8))
365 ret <vscale x 8 x i16> %out
368 define <vscale x 4 x i32> @bic_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
369 ; CHECK-LABEL: bic_imm_i32:
371 ; CHECK-NEXT: and z0.s, z0.s, #0xffffffef
373 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.bic.u.nxv4i32(<vscale x 4 x i1> %pg,
374 <vscale x 4 x i32> %a,
375 <vscale x 4 x i32> splat(i32 16))
376 ret <vscale x 4 x i32> %out
379 define <vscale x 2 x i64> @bic_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
380 ; CHECK-LABEL: bic_imm_i64:
382 ; CHECK-NEXT: and z0.d, z0.d, #0xffffffffffffffdf
384 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.bic.u.nxv2i64(<vscale x 2 x i1> %pg,
385 <vscale x 2 x i64> %a,
386 <vscale x 2 x i64> splat(i64 32))
387 ret <vscale x 2 x i64> %out
390 declare <vscale x 16 x i8> @llvm.aarch64.sve.and.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
391 declare <vscale x 8 x i16> @llvm.aarch64.sve.and.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
392 declare <vscale x 4 x i32> @llvm.aarch64.sve.and.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
393 declare <vscale x 2 x i64> @llvm.aarch64.sve.and.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
395 declare <vscale x 16 x i8> @llvm.aarch64.sve.eor.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
396 declare <vscale x 8 x i16> @llvm.aarch64.sve.eor.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
397 declare <vscale x 4 x i32> @llvm.aarch64.sve.eor.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
398 declare <vscale x 2 x i64> @llvm.aarch64.sve.eor.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
400 declare <vscale x 16 x i8> @llvm.aarch64.sve.orr.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
401 declare <vscale x 8 x i16> @llvm.aarch64.sve.orr.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
402 declare <vscale x 4 x i32> @llvm.aarch64.sve.orr.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
403 declare <vscale x 2 x i64> @llvm.aarch64.sve.orr.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
405 declare <vscale x 16 x i8> @llvm.aarch64.sve.bic.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
406 declare <vscale x 8 x i16> @llvm.aarch64.sve.bic.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
407 declare <vscale x 4 x i32> @llvm.aarch64.sve.bic.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
408 declare <vscale x 2 x i64> @llvm.aarch64.sve.bic.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)