1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme < %s | FileCheck %s
9 define <vscale x 16 x i1> @brka_m_b8(<vscale x 16 x i1> %inactive, <vscale x 16 x i1> %pg, <vscale x 16 x i1> %a) {
10 ; CHECK-LABEL: brka_m_b8:
12 ; CHECK-NEXT: brka p0.b, p1/m, p2.b
14 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brka.nxv16i1(<vscale x 16 x i1> %inactive,
15 <vscale x 16 x i1> %pg,
16 <vscale x 16 x i1> %a)
17 ret <vscale x 16 x i1> %out
20 define <vscale x 16 x i1> @brka_z_b8(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a) {
21 ; CHECK-LABEL: brka_z_b8:
23 ; CHECK-NEXT: brka p0.b, p0/z, p1.b
25 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brka.z.nxv16i1(<vscale x 16 x i1> %pg,
26 <vscale x 16 x i1> %a)
27 ret <vscale x 16 x i1> %out
34 define <vscale x 16 x i1> @brkb_m_b8(<vscale x 16 x i1> %inactive, <vscale x 16 x i1> %pg, <vscale x 16 x i1> %a) {
35 ; CHECK-LABEL: brkb_m_b8:
37 ; CHECK-NEXT: brkb p0.b, p1/m, p2.b
39 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brkb.nxv16i1(<vscale x 16 x i1> %inactive,
40 <vscale x 16 x i1> %pg,
41 <vscale x 16 x i1> %a)
42 ret <vscale x 16 x i1> %out
45 define <vscale x 16 x i1> @brkb_z_b8(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a) {
46 ; CHECK-LABEL: brkb_z_b8:
48 ; CHECK-NEXT: brkb p0.b, p0/z, p1.b
50 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brkb.z.nxv16i1(<vscale x 16 x i1> %pg,
51 <vscale x 16 x i1> %a)
52 ret <vscale x 16 x i1> %out
59 define <vscale x 16 x i1> @brkn_b8(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
60 ; CHECK-LABEL: brkn_b8:
62 ; CHECK-NEXT: brkn p2.b, p0/z, p1.b, p2.b
63 ; CHECK-NEXT: mov p0.b, p2.b
65 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brkn.z.nxv16i1(<vscale x 16 x i1> %pg,
66 <vscale x 16 x i1> %a,
67 <vscale x 16 x i1> %b)
68 ret <vscale x 16 x i1> %out
75 define <vscale x 16 x i1> @brkpa_b8(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
76 ; CHECK-LABEL: brkpa_b8:
78 ; CHECK-NEXT: brkpa p0.b, p0/z, p1.b, p2.b
80 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brkpa.z.nxv16i1(<vscale x 16 x i1> %pg,
81 <vscale x 16 x i1> %a,
82 <vscale x 16 x i1> %b)
83 ret <vscale x 16 x i1> %out
90 define <vscale x 16 x i1> @brkpb_b8(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a, <vscale x 16 x i1> %b) {
91 ; CHECK-LABEL: brkpb_b8:
93 ; CHECK-NEXT: brkpb p0.b, p0/z, p1.b, p2.b
95 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.brkpb.z.nxv16i1(<vscale x 16 x i1> %pg,
96 <vscale x 16 x i1> %a,
97 <vscale x 16 x i1> %b)
98 ret <vscale x 16 x i1> %out
105 define <vscale x 16 x i1> @pfirst_b8(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a) {
106 ; CHECK-LABEL: pfirst_b8:
108 ; CHECK-NEXT: pfirst p1.b, p0, p1.b
109 ; CHECK-NEXT: mov p0.b, p1.b
111 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.pfirst.nxv16i1(<vscale x 16 x i1> %pg,
112 <vscale x 16 x i1> %a)
113 ret <vscale x 16 x i1> %out
120 define <vscale x 16 x i1> @pnext_b8(<vscale x 16 x i1> %pg, <vscale x 16 x i1> %a) {
121 ; CHECK-LABEL: pnext_b8:
123 ; CHECK-NEXT: pnext p1.b, p0, p1.b
124 ; CHECK-NEXT: mov p0.b, p1.b
126 %out = call <vscale x 16 x i1> @llvm.aarch64.sve.pnext.nxv16i1(<vscale x 16 x i1> %pg,
127 <vscale x 16 x i1> %a)
128 ret <vscale x 16 x i1> %out
131 define <vscale x 8 x i1> @pnext_b16(<vscale x 8 x i1> %pg, <vscale x 8 x i1> %a) {
132 ; CHECK-LABEL: pnext_b16:
134 ; CHECK-NEXT: pnext p1.h, p0, p1.h
135 ; CHECK-NEXT: mov p0.b, p1.b
137 %out = call <vscale x 8 x i1> @llvm.aarch64.sve.pnext.nxv8i1(<vscale x 8 x i1> %pg,
138 <vscale x 8 x i1> %a)
139 ret <vscale x 8 x i1> %out
142 define <vscale x 4 x i1> @pnext_b32(<vscale x 4 x i1> %pg, <vscale x 4 x i1> %a) {
143 ; CHECK-LABEL: pnext_b32:
145 ; CHECK-NEXT: pnext p1.s, p0, p1.s
146 ; CHECK-NEXT: mov p0.b, p1.b
148 %out = call <vscale x 4 x i1> @llvm.aarch64.sve.pnext.nxv4i1(<vscale x 4 x i1> %pg,
149 <vscale x 4 x i1> %a)
150 ret <vscale x 4 x i1> %out
153 define <vscale x 2 x i1> @pnext_b64(<vscale x 2 x i1> %pg, <vscale x 2 x i1> %a) {
154 ; CHECK-LABEL: pnext_b64:
156 ; CHECK-NEXT: pnext p1.d, p0, p1.d
157 ; CHECK-NEXT: mov p0.b, p1.b
159 %out = call <vscale x 2 x i1> @llvm.aarch64.sve.pnext.nxv2i1(<vscale x 2 x i1> %pg,
160 <vscale x 2 x i1> %a)
161 ret <vscale x 2 x i1> %out
168 define <vscale x 8 x i1> @punpkhi_b16(<vscale x 16 x i1> %a) {
169 ; CHECK-LABEL: punpkhi_b16:
171 ; CHECK-NEXT: punpkhi p0.h, p0.b
173 %res = call <vscale x 8 x i1> @llvm.aarch64.sve.punpkhi.nxv8i1(<vscale x 16 x i1> %a)
174 ret <vscale x 8 x i1> %res
177 define <vscale x 4 x i1> @punpkhi_b8(<vscale x 8 x i1> %a) {
178 ; CHECK-LABEL: punpkhi_b8:
180 ; CHECK-NEXT: punpkhi p0.h, p0.b
182 %res = call <vscale x 4 x i1> @llvm.aarch64.sve.punpkhi.nxv4i1(<vscale x 8 x i1> %a)
183 ret <vscale x 4 x i1> %res
186 define <vscale x 2 x i1> @punpkhi_b4(<vscale x 4 x i1> %a) {
187 ; CHECK-LABEL: punpkhi_b4:
189 ; CHECK-NEXT: punpkhi p0.h, p0.b
191 %res = call <vscale x 2 x i1> @llvm.aarch64.sve.punpkhi.nxv2i1(<vscale x 4 x i1> %a)
192 ret <vscale x 2 x i1> %res
199 define <vscale x 8 x i1> @punpklo_b16(<vscale x 16 x i1> %a) {
200 ; CHECK-LABEL: punpklo_b16:
202 ; CHECK-NEXT: punpklo p0.h, p0.b
204 %res = call <vscale x 8 x i1> @llvm.aarch64.sve.punpklo.nxv8i1(<vscale x 16 x i1> %a)
205 ret <vscale x 8 x i1> %res
208 define <vscale x 4 x i1> @punpklo_b8(<vscale x 8 x i1> %a) {
209 ; CHECK-LABEL: punpklo_b8:
211 ; CHECK-NEXT: punpklo p0.h, p0.b
213 %res = call <vscale x 4 x i1> @llvm.aarch64.sve.punpklo.nxv4i1(<vscale x 8 x i1> %a)
214 ret <vscale x 4 x i1> %res
217 define <vscale x 2 x i1> @punpklo_b4(<vscale x 4 x i1> %a) {
218 ; CHECK-LABEL: punpklo_b4:
220 ; CHECK-NEXT: punpklo p0.h, p0.b
222 %res = call <vscale x 2 x i1> @llvm.aarch64.sve.punpklo.nxv2i1(<vscale x 4 x i1> %a)
223 ret <vscale x 2 x i1> %res
226 declare <vscale x 16 x i1> @llvm.aarch64.sve.brka.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
227 declare <vscale x 16 x i1> @llvm.aarch64.sve.brka.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
228 declare <vscale x 16 x i1> @llvm.aarch64.sve.brkb.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
229 declare <vscale x 16 x i1> @llvm.aarch64.sve.brkb.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
230 declare <vscale x 16 x i1> @llvm.aarch64.sve.brkn.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
231 declare <vscale x 16 x i1> @llvm.aarch64.sve.brkpa.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
232 declare <vscale x 16 x i1> @llvm.aarch64.sve.brkpb.z.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>)
234 declare <vscale x 16 x i1> @llvm.aarch64.sve.pfirst.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
236 declare <vscale x 16 x i1> @llvm.aarch64.sve.pnext.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>)
237 declare <vscale x 8 x i1> @llvm.aarch64.sve.pnext.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>)
238 declare <vscale x 4 x i1> @llvm.aarch64.sve.pnext.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>)
239 declare <vscale x 2 x i1> @llvm.aarch64.sve.pnext.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>)
241 declare <vscale x 8 x i1> @llvm.aarch64.sve.punpkhi.nxv8i1(<vscale x 16 x i1>)
242 declare <vscale x 4 x i1> @llvm.aarch64.sve.punpkhi.nxv4i1(<vscale x 8 x i1>)
243 declare <vscale x 2 x i1> @llvm.aarch64.sve.punpkhi.nxv2i1(<vscale x 4 x i1>)
245 declare <vscale x 8 x i1> @llvm.aarch64.sve.punpklo.nxv8i1(<vscale x 16 x i1>)
246 declare <vscale x 4 x i1> @llvm.aarch64.sve.punpklo.nxv4i1(<vscale x 8 x i1>)
247 declare <vscale x 2 x i1> @llvm.aarch64.sve.punpklo.nxv2i1(<vscale x 4 x i1>)