1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc -mtriple=aarch64 -mattr=+sve < %s -o - | FileCheck --check-prefix=SVE %s
3 ; RUN: llc -mtriple=aarch64 -mattr=+sve2 < %s -o - | FileCheck --check-prefix=SVE2 %s
5 define <vscale x 2 x i64> @bcax_nxv2i64_1(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
6 ; SVE-LABEL: bcax_nxv2i64_1:
8 ; SVE-NEXT: bic z1.d, z2.d, z1.d
9 ; SVE-NEXT: eor z0.d, z1.d, z0.d
12 ; SVE2-LABEL: bcax_nxv2i64_1:
14 ; SVE2-NEXT: bcax z0.d, z0.d, z2.d, z1.d
16 %4 = xor <vscale x 2 x i64> %1, splat (i64 -1)
17 %5 = and <vscale x 2 x i64> %4, %2
18 %6 = xor <vscale x 2 x i64> %5, %0
19 ret <vscale x 2 x i64> %6
22 define <vscale x 2 x i64> @bcax_nxv2i64_2(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2) {
23 ; SVE-LABEL: bcax_nxv2i64_2:
25 ; SVE-NEXT: bic z0.d, z0.d, z1.d
26 ; SVE-NEXT: eor z0.d, z0.d, z2.d
29 ; SVE2-LABEL: bcax_nxv2i64_2:
31 ; SVE2-NEXT: bcax z2.d, z2.d, z0.d, z1.d
32 ; SVE2-NEXT: mov z0.d, z2.d
34 %4 = xor <vscale x 2 x i64> %1, splat (i64 -1)
35 %5 = and <vscale x 2 x i64> %4, %0
36 %6 = xor <vscale x 2 x i64> %5, %2
37 ret <vscale x 2 x i64> %6
40 define <vscale x 4 x i32> @bcax_nxv4i32_1(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
41 ; SVE-LABEL: bcax_nxv4i32_1:
43 ; SVE-NEXT: bic z1.d, z2.d, z1.d
44 ; SVE-NEXT: eor z0.d, z1.d, z0.d
47 ; SVE2-LABEL: bcax_nxv4i32_1:
49 ; SVE2-NEXT: bcax z0.d, z0.d, z2.d, z1.d
51 %4 = xor <vscale x 4 x i32> %1, splat (i32 -1)
52 %5 = and <vscale x 4 x i32> %4, %2
53 %6 = xor <vscale x 4 x i32> %5, %0
54 ret <vscale x 4 x i32> %6
57 define <vscale x 4 x i32> @bcax_nxv4i32_2(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2) {
58 ; SVE-LABEL: bcax_nxv4i32_2:
60 ; SVE-NEXT: bic z0.d, z0.d, z1.d
61 ; SVE-NEXT: eor z0.d, z0.d, z2.d
64 ; SVE2-LABEL: bcax_nxv4i32_2:
66 ; SVE2-NEXT: bcax z2.d, z2.d, z0.d, z1.d
67 ; SVE2-NEXT: mov z0.d, z2.d
69 %4 = xor <vscale x 4 x i32> %1, splat (i32 -1)
70 %5 = and <vscale x 4 x i32> %4, %0
71 %6 = xor <vscale x 4 x i32> %5, %2
72 ret <vscale x 4 x i32> %6
75 define <vscale x 8 x i16> @bcax_nxv8i16_1(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
76 ; SVE-LABEL: bcax_nxv8i16_1:
78 ; SVE-NEXT: bic z1.d, z2.d, z1.d
79 ; SVE-NEXT: eor z0.d, z1.d, z0.d
82 ; SVE2-LABEL: bcax_nxv8i16_1:
84 ; SVE2-NEXT: bcax z0.d, z0.d, z2.d, z1.d
86 %4 = xor <vscale x 8 x i16> %1, splat (i16 -1)
87 %5 = and <vscale x 8 x i16> %4, %2
88 %6 = xor <vscale x 8 x i16> %5, %0
89 ret <vscale x 8 x i16> %6
92 define <vscale x 8 x i16> @bcax_nxv8i16_2(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2) {
93 ; SVE-LABEL: bcax_nxv8i16_2:
95 ; SVE-NEXT: bic z0.d, z0.d, z1.d
96 ; SVE-NEXT: eor z0.d, z0.d, z2.d
99 ; SVE2-LABEL: bcax_nxv8i16_2:
101 ; SVE2-NEXT: bcax z2.d, z2.d, z0.d, z1.d
102 ; SVE2-NEXT: mov z0.d, z2.d
104 %4 = xor <vscale x 8 x i16> %1, splat (i16 -1)
105 %5 = and <vscale x 8 x i16> %4, %0
106 %6 = xor <vscale x 8 x i16> %5, %2
107 ret <vscale x 8 x i16> %6
110 define <vscale x 16 x i8> @bcax_nxv16i8_1(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
111 ; SVE-LABEL: bcax_nxv16i8_1:
113 ; SVE-NEXT: bic z1.d, z2.d, z1.d
114 ; SVE-NEXT: eor z0.d, z1.d, z0.d
117 ; SVE2-LABEL: bcax_nxv16i8_1:
119 ; SVE2-NEXT: bcax z0.d, z0.d, z2.d, z1.d
121 %4 = xor <vscale x 16 x i8> %1, splat (i8 -1)
122 %5 = and <vscale x 16 x i8> %4, %2
123 %6 = xor <vscale x 16 x i8> %5, %0
124 ret <vscale x 16 x i8> %6
127 define <vscale x 16 x i8> @bcax_nxv16i8_2(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2) {
128 ; SVE-LABEL: bcax_nxv16i8_2:
130 ; SVE-NEXT: bic z0.d, z0.d, z1.d
131 ; SVE-NEXT: eor z0.d, z0.d, z2.d
134 ; SVE2-LABEL: bcax_nxv16i8_2:
136 ; SVE2-NEXT: bcax z2.d, z2.d, z0.d, z1.d
137 ; SVE2-NEXT: mov z0.d, z2.d
139 %4 = xor <vscale x 16 x i8> %1, splat (i8 -1)
140 %5 = and <vscale x 16 x i8> %4, %0
141 %6 = xor <vscale x 16 x i8> %5, %2
142 ret <vscale x 16 x i8> %6