1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
6 define <vscale x 16 x i8> @sqadd_b_lowimm(<vscale x 16 x i8> %a) {
7 ; CHECK-LABEL: sqadd_b_lowimm:
9 ; CHECK-NEXT: sqadd z0.b, z0.b, #27 // =0x1b
11 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
12 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.nxv16i8(<vscale x 16 x i1> %pg,
13 <vscale x 16 x i8> %a,
14 <vscale x 16 x i8> splat(i8 27))
15 ret <vscale x 16 x i8> %out
18 define <vscale x 8 x i16> @sqadd_h_lowimm(<vscale x 8 x i16> %a) {
19 ; CHECK-LABEL: sqadd_h_lowimm:
21 ; CHECK-NEXT: sqadd z0.h, z0.h, #43 // =0x2b
23 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
24 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.nxv8i16(<vscale x 8 x i1> %pg,
25 <vscale x 8 x i16> %a,
26 <vscale x 8 x i16> splat(i16 43))
27 ret <vscale x 8 x i16> %out
30 define <vscale x 8 x i16> @sqadd_h_highimm(<vscale x 8 x i16> %a) {
31 ; CHECK-LABEL: sqadd_h_highimm:
33 ; CHECK-NEXT: sqadd z0.h, z0.h, #2048 // =0x800
35 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
36 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.nxv8i16(<vscale x 8 x i1> %pg,
37 <vscale x 8 x i16> %a,
38 <vscale x 8 x i16> splat(i16 2048))
39 ret <vscale x 8 x i16> %out
42 define <vscale x 4 x i32> @sqadd_s_lowimm(<vscale x 4 x i32> %a) {
43 ; CHECK-LABEL: sqadd_s_lowimm:
45 ; CHECK-NEXT: sqadd z0.s, z0.s, #1 // =0x1
47 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
48 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.nxv4i32(<vscale x 4 x i1> %pg,
49 <vscale x 4 x i32> %a,
50 <vscale x 4 x i32> splat(i32 1))
51 ret <vscale x 4 x i32> %out
54 define <vscale x 4 x i32> @sqadd_s_highimm(<vscale x 4 x i32> %a) {
55 ; CHECK-LABEL: sqadd_s_highimm:
57 ; CHECK-NEXT: sqadd z0.s, z0.s, #8192 // =0x2000
59 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
60 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.nxv4i32(<vscale x 4 x i1> %pg,
61 <vscale x 4 x i32> %a,
62 <vscale x 4 x i32> splat(i32 8192))
63 ret <vscale x 4 x i32> %out
66 define <vscale x 2 x i64> @sqadd_d_lowimm(<vscale x 2 x i64> %a) {
67 ; CHECK-LABEL: sqadd_d_lowimm:
69 ; CHECK-NEXT: sqadd z0.d, z0.d, #255 // =0xff
71 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
72 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.nxv2i64(<vscale x 2 x i1> %pg,
73 <vscale x 2 x i64> %a,
74 <vscale x 2 x i64> splat(i64 255))
75 ret <vscale x 2 x i64> %out
78 define <vscale x 2 x i64> @sqadd_d_highimm(<vscale x 2 x i64> %a) {
79 ; CHECK-LABEL: sqadd_d_highimm:
81 ; CHECK-NEXT: sqadd z0.d, z0.d, #65280 // =0xff00
83 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
84 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.nxv2i64(<vscale x 2 x i1> %pg,
85 <vscale x 2 x i64> %a,
86 <vscale x 2 x i64> splat(i64 65280))
87 ret <vscale x 2 x i64> %out
92 define <vscale x 16 x i8> @sqsub_b_lowimm(<vscale x 16 x i8> %a) {
93 ; CHECK-LABEL: sqsub_b_lowimm:
95 ; CHECK-NEXT: sqsub z0.b, z0.b, #27 // =0x1b
97 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
98 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.u.nxv16i8(<vscale x 16 x i1> %pg,
99 <vscale x 16 x i8> %a,
100 <vscale x 16 x i8> splat(i8 27))
101 ret <vscale x 16 x i8> %out
104 define <vscale x 8 x i16> @sqsub_h_lowimm(<vscale x 8 x i16> %a) {
105 ; CHECK-LABEL: sqsub_h_lowimm:
107 ; CHECK-NEXT: sqsub z0.h, z0.h, #43 // =0x2b
109 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
110 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.u.nxv8i16(<vscale x 8 x i1> %pg,
111 <vscale x 8 x i16> %a,
112 <vscale x 8 x i16> splat(i16 43))
113 ret <vscale x 8 x i16> %out
116 define <vscale x 8 x i16> @sqsub_h_highimm(<vscale x 8 x i16> %a) {
117 ; CHECK-LABEL: sqsub_h_highimm:
119 ; CHECK-NEXT: sqsub z0.h, z0.h, #2048 // =0x800
121 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
122 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.u.nxv8i16(<vscale x 8 x i1> %pg,
123 <vscale x 8 x i16> %a,
124 <vscale x 8 x i16> splat(i16 2048))
125 ret <vscale x 8 x i16> %out
128 define <vscale x 4 x i32> @sqsub_s_lowimm(<vscale x 4 x i32> %a) {
129 ; CHECK-LABEL: sqsub_s_lowimm:
131 ; CHECK-NEXT: sqsub z0.s, z0.s, #1 // =0x1
133 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
134 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.u.nxv4i32(<vscale x 4 x i1> %pg,
135 <vscale x 4 x i32> %a,
136 <vscale x 4 x i32> splat(i32 1))
137 ret <vscale x 4 x i32> %out
140 define <vscale x 4 x i32> @sqsub_s_highimm(<vscale x 4 x i32> %a) {
141 ; CHECK-LABEL: sqsub_s_highimm:
143 ; CHECK-NEXT: sqsub z0.s, z0.s, #8192 // =0x2000
145 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
146 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.u.nxv4i32(<vscale x 4 x i1> %pg,
147 <vscale x 4 x i32> %a,
148 <vscale x 4 x i32> splat(i32 8192))
149 ret <vscale x 4 x i32> %out
152 define <vscale x 2 x i64> @sqsub_d_lowimm(<vscale x 2 x i64> %a) {
153 ; CHECK-LABEL: sqsub_d_lowimm:
155 ; CHECK-NEXT: sqsub z0.d, z0.d, #255 // =0xff
157 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
158 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.u.nxv2i64(<vscale x 2 x i1> %pg,
159 <vscale x 2 x i64> %a,
160 <vscale x 2 x i64> splat(i64 255))
161 ret <vscale x 2 x i64> %out
164 define <vscale x 2 x i64> @sqsub_d_highimm(<vscale x 2 x i64> %a) {
165 ; CHECK-LABEL: sqsub_d_highimm:
167 ; CHECK-NEXT: sqsub z0.d, z0.d, #65280 // =0xff00
169 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
170 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.u.nxv2i64(<vscale x 2 x i1> %pg,
171 <vscale x 2 x i64> %a,
172 <vscale x 2 x i64> splat(i64 65280))
173 ret <vscale x 2 x i64> %out
178 define <vscale x 16 x i8> @uqadd_b_lowimm(<vscale x 16 x i8> %a) {
179 ; CHECK-LABEL: uqadd_b_lowimm:
181 ; CHECK-NEXT: uqadd z0.b, z0.b, #27 // =0x1b
183 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
184 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.nxv16i8(<vscale x 16 x i1> %pg,
185 <vscale x 16 x i8> %a,
186 <vscale x 16 x i8> splat(i8 27))
187 ret <vscale x 16 x i8> %out
190 define <vscale x 8 x i16> @uqadd_h_lowimm(<vscale x 8 x i16> %a) {
191 ; CHECK-LABEL: uqadd_h_lowimm:
193 ; CHECK-NEXT: uqadd z0.h, z0.h, #43 // =0x2b
195 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
196 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.nxv8i16(<vscale x 8 x i1> %pg,
197 <vscale x 8 x i16> %a,
198 <vscale x 8 x i16> splat(i16 43))
199 ret <vscale x 8 x i16> %out
202 define <vscale x 8 x i16> @uqadd_h_highimm(<vscale x 8 x i16> %a) {
203 ; CHECK-LABEL: uqadd_h_highimm:
205 ; CHECK-NEXT: uqadd z0.h, z0.h, #2048 // =0x800
207 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
208 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.nxv8i16(<vscale x 8 x i1> %pg,
209 <vscale x 8 x i16> %a,
210 <vscale x 8 x i16> splat(i16 2048))
211 ret <vscale x 8 x i16> %out
214 define <vscale x 4 x i32> @uqadd_s_lowimm(<vscale x 4 x i32> %a) {
215 ; CHECK-LABEL: uqadd_s_lowimm:
217 ; CHECK-NEXT: uqadd z0.s, z0.s, #1 // =0x1
219 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
220 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.nxv4i32(<vscale x 4 x i1> %pg,
221 <vscale x 4 x i32> %a,
222 <vscale x 4 x i32> splat(i32 1))
223 ret <vscale x 4 x i32> %out
226 define <vscale x 4 x i32> @uqadd_s_highimm(<vscale x 4 x i32> %a) {
227 ; CHECK-LABEL: uqadd_s_highimm:
229 ; CHECK-NEXT: uqadd z0.s, z0.s, #8192 // =0x2000
231 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
232 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.nxv4i32(<vscale x 4 x i1> %pg,
233 <vscale x 4 x i32> %a,
234 <vscale x 4 x i32> splat(i32 8192))
235 ret <vscale x 4 x i32> %out
238 define <vscale x 2 x i64> @uqadd_d_lowimm(<vscale x 2 x i64> %a) {
239 ; CHECK-LABEL: uqadd_d_lowimm:
241 ; CHECK-NEXT: uqadd z0.d, z0.d, #255 // =0xff
243 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
244 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.nxv2i64(<vscale x 2 x i1> %pg,
245 <vscale x 2 x i64> %a,
246 <vscale x 2 x i64> splat(i64 255))
247 ret <vscale x 2 x i64> %out
250 define <vscale x 2 x i64> @uqadd_d_highimm(<vscale x 2 x i64> %a) {
251 ; CHECK-LABEL: uqadd_d_highimm:
253 ; CHECK-NEXT: uqadd z0.d, z0.d, #65280 // =0xff00
255 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
256 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.nxv2i64(<vscale x 2 x i1> %pg,
257 <vscale x 2 x i64> %a,
258 <vscale x 2 x i64> splat(i64 65280))
259 ret <vscale x 2 x i64> %out
264 define <vscale x 16 x i8> @uqsub_b_lowimm(<vscale x 16 x i8> %a) {
265 ; CHECK-LABEL: uqsub_b_lowimm:
267 ; CHECK-NEXT: uqsub z0.b, z0.b, #27 // =0x1b
269 %pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
270 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.u.nxv16i8(<vscale x 16 x i1> %pg,
271 <vscale x 16 x i8> %a,
272 <vscale x 16 x i8> splat(i8 27))
273 ret <vscale x 16 x i8> %out
276 define <vscale x 8 x i16> @uqsub_h_lowimm(<vscale x 8 x i16> %a) {
277 ; CHECK-LABEL: uqsub_h_lowimm:
279 ; CHECK-NEXT: uqsub z0.h, z0.h, #43 // =0x2b
281 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
282 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.u.nxv8i16(<vscale x 8 x i1> %pg,
283 <vscale x 8 x i16> %a,
284 <vscale x 8 x i16> splat(i16 43))
285 ret <vscale x 8 x i16> %out
288 define <vscale x 8 x i16> @uqsub_h_highimm(<vscale x 8 x i16> %a) {
289 ; CHECK-LABEL: uqsub_h_highimm:
291 ; CHECK-NEXT: uqsub z0.h, z0.h, #2048 // =0x800
293 %pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
294 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.u.nxv8i16(<vscale x 8 x i1> %pg,
295 <vscale x 8 x i16> %a,
296 <vscale x 8 x i16> splat(i16 2048))
297 ret <vscale x 8 x i16> %out
300 define <vscale x 4 x i32> @uqsub_s_lowimm(<vscale x 4 x i32> %a) {
301 ; CHECK-LABEL: uqsub_s_lowimm:
303 ; CHECK-NEXT: uqsub z0.s, z0.s, #1 // =0x1
305 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
306 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.u.nxv4i32(<vscale x 4 x i1> %pg,
307 <vscale x 4 x i32> %a,
308 <vscale x 4 x i32> splat(i32 1))
309 ret <vscale x 4 x i32> %out
312 define <vscale x 4 x i32> @uqsub_s_highimm(<vscale x 4 x i32> %a) {
313 ; CHECK-LABEL: uqsub_s_highimm:
315 ; CHECK-NEXT: uqsub z0.s, z0.s, #8192 // =0x2000
317 %pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
318 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.u.nxv4i32(<vscale x 4 x i1> %pg,
319 <vscale x 4 x i32> %a,
320 <vscale x 4 x i32> splat(i32 8192))
321 ret <vscale x 4 x i32> %out
324 define <vscale x 2 x i64> @uqsub_d_lowimm(<vscale x 2 x i64> %a) {
325 ; CHECK-LABEL: uqsub_d_lowimm:
327 ; CHECK-NEXT: uqsub z0.d, z0.d, #255 // =0xff
329 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
330 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.u.nxv2i64(<vscale x 2 x i1> %pg,
331 <vscale x 2 x i64> %a,
332 <vscale x 2 x i64> splat(i64 255))
333 ret <vscale x 2 x i64> %out
336 define <vscale x 2 x i64> @uqsub_d_highimm(<vscale x 2 x i64> %a) {
337 ; CHECK-LABEL: uqsub_d_highimm:
339 ; CHECK-NEXT: uqsub z0.d, z0.d, #65280 // =0xff00
341 %pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
342 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.u.nxv2i64(<vscale x 2 x i1> %pg,
343 <vscale x 2 x i64> %a,
344 <vscale x 2 x i64> splat(i64 65280))
345 ret <vscale x 2 x i64> %out
348 ; As uqsub_i32 but where pg is i8 based and thus compatible for i32.
349 define <vscale x 4 x i32> @uqsub_i32_ptrue_all_b(<vscale x 4 x i32> %a) #0 {
350 ; CHECK-LABEL: uqsub_i32_ptrue_all_b:
352 ; CHECK-NEXT: uqsub z0.s, z0.s, #1 // =0x1
354 %pg.b = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
355 %pg.s = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.b)
356 %b = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 1)
357 %out = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.u.nxv4i32(<vscale x 4 x i1> %pg.s,
358 <vscale x 4 x i32> %a,
359 <vscale x 4 x i32> %b)
360 ret <vscale x 4 x i32> %out
363 ; As uqsub_i32 but where pg is i16 based and thus compatible for i32.
364 define <vscale x 4 x i32> @uqsub_i32_ptrue_all_h(<vscale x 4 x i32> %a) #0 {
365 ; CHECK-LABEL: uqsub_i32_ptrue_all_h:
367 ; CHECK-NEXT: uqsub z0.s, z0.s, #1 // =0x1
369 %pg.h = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
370 %pg.b = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %pg.h)
371 %pg.s = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.b)
372 %b = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 1)
373 %out = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.u.nxv4i32(<vscale x 4 x i1> %pg.s,
374 <vscale x 4 x i32> %a,
375 <vscale x 4 x i32> %b)
376 ret <vscale x 4 x i32> %out
379 ; As uqsub_i32 but where pg is i64 based, which is not compatibile for i32 and
380 ; thus inactive lanes are important and the immediate form cannot be used.
381 define <vscale x 4 x i32> @uqsub_i32_ptrue_all_d(<vscale x 4 x i32> %a) #0 {
382 ; CHECK-LABEL: uqsub_i32_ptrue_all_d:
384 ; CHECK-NEXT: mov z1.s, #1 // =0x1
385 ; CHECK-NEXT: ptrue p0.d
386 ; CHECK-NEXT: uqsub z0.s, p0/m, z0.s, z1.s
388 %pg.d = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
389 %pg.b = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg.d)
390 %pg.s = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.b)
391 %b = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 1)
392 %out = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> %pg.s,
393 <vscale x 4 x i32> %a,
394 <vscale x 4 x i32> %b)
395 ret <vscale x 4 x i32> %out
398 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
399 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
400 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
401 declare <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
403 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
404 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
405 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
406 declare <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
408 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
409 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
410 declare <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
411 declare <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
413 declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
415 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
416 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
417 declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
418 declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
420 declare <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1>)
421 declare <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1>)
422 declare <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1>)
424 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 16 x i1>)
425 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>)
426 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>)
428 declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32)
430 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern)
431 declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 %pattern)
432 declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 %pattern)
433 declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 %pattern)