1 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3 define <vscale x 16 x i8> @add_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
5 ; CHECK: add z0.b, p0/m, z0.b, z1.b
7 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1> %pg,
10 ret <vscale x 16 x i8> %out
13 define <vscale x 8 x i16> @add_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
14 ; CHECK-LABEL: add_i16:
15 ; CHECK: add z0.h, p0/m, z0.h, z1.h
17 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1> %pg,
18 <vscale x 8 x i16> %a,
19 <vscale x 8 x i16> %b)
20 ret <vscale x 8 x i16> %out
23 define <vscale x 4 x i32> @add_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
24 ; CHECK-LABEL: add_i32:
25 ; CHECK: add z0.s, p0/m, z0.s, z1.s
27 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1> %pg,
28 <vscale x 4 x i32> %a,
29 <vscale x 4 x i32> %b)
30 ret <vscale x 4 x i32> %out
33 define <vscale x 2 x i64> @add_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
34 ; CHECK-LABEL: add_i64:
35 ; CHECK: add z0.d, p0/m, z0.d, z1.d
37 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1> %pg,
38 <vscale x 2 x i64> %a,
39 <vscale x 2 x i64> %b)
40 ret <vscale x 2 x i64> %out
43 define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
44 ; CHECK-LABEL: sub_i8:
45 ; CHECK: sub z0.b, p0/m, z0.b, z1.b
47 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1> %pg,
48 <vscale x 16 x i8> %a,
49 <vscale x 16 x i8> %b)
50 ret <vscale x 16 x i8> %out
53 define <vscale x 8 x i16> @sub_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
54 ; CHECK-LABEL: sub_i16:
55 ; CHECK: sub z0.h, p0/m, z0.h, z1.h
57 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1> %pg,
58 <vscale x 8 x i16> %a,
59 <vscale x 8 x i16> %b)
60 ret <vscale x 8 x i16> %out
63 define <vscale x 4 x i32> @sub_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
64 ; CHECK-LABEL: sub_i32:
65 ; CHECK: sub z0.s, p0/m, z0.s, z1.s
67 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1> %pg,
68 <vscale x 4 x i32> %a,
69 <vscale x 4 x i32> %b)
70 ret <vscale x 4 x i32> %out
73 define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
74 ; CHECK-LABEL: sub_i64:
75 ; CHECK: sub z0.d, p0/m, z0.d, z1.d
77 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1> %pg,
78 <vscale x 2 x i64> %a,
79 <vscale x 2 x i64> %b)
80 ret <vscale x 2 x i64> %out
83 define <vscale x 16 x i8> @subr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
84 ; CHECK-LABEL: subr_i8:
85 ; CHECK: subr z0.b, p0/m, z0.b, z1.b
87 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1> %pg,
88 <vscale x 16 x i8> %a,
89 <vscale x 16 x i8> %b)
90 ret <vscale x 16 x i8> %out
93 define <vscale x 8 x i16> @subr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
94 ; CHECK-LABEL: subr_i16:
95 ; CHECK: subr z0.h, p0/m, z0.h, z1.h
97 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1> %pg,
98 <vscale x 8 x i16> %a,
99 <vscale x 8 x i16> %b)
100 ret <vscale x 8 x i16> %out
103 define <vscale x 4 x i32> @subr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
104 ; CHECK-LABEL: subr_i32:
105 ; CHECK: subr z0.s, p0/m, z0.s, z1.s
107 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1> %pg,
108 <vscale x 4 x i32> %a,
109 <vscale x 4 x i32> %b)
110 ret <vscale x 4 x i32> %out
113 define <vscale x 2 x i64> @subr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
114 ; CHECK-LABEL: subr_i64:
115 ; CHECK: subr z0.d, p0/m, z0.d, z1.d
117 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1> %pg,
118 <vscale x 2 x i64> %a,
119 <vscale x 2 x i64> %b)
120 ret <vscale x 2 x i64> %out
123 define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
124 ; CHECK-LABEL: smax_i8:
125 ; CHECK: smax z0.b, p0/m, z0.b, z1.b
127 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1> %pg,
128 <vscale x 16 x i8> %a,
129 <vscale x 16 x i8> %b)
130 ret <vscale x 16 x i8> %out
133 define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
134 ; CHECK-LABEL: smax_i16:
135 ; CHECK: smax z0.h, p0/m, z0.h, z1.h
137 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1> %pg,
138 <vscale x 8 x i16> %a,
139 <vscale x 8 x i16> %b)
140 ret <vscale x 8 x i16> %out
143 define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
144 ; CHECK-LABEL: smax_i32:
145 ; CHECK: smax z0.s, p0/m, z0.s, z1.s
147 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1> %pg,
148 <vscale x 4 x i32> %a,
149 <vscale x 4 x i32> %b)
150 ret <vscale x 4 x i32> %out
153 define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
154 ; CHECK-LABEL: smax_i64:
155 ; CHECK: smax z0.d, p0/m, z0.d, z1.d
157 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1> %pg,
158 <vscale x 2 x i64> %a,
159 <vscale x 2 x i64> %b)
160 ret <vscale x 2 x i64> %out
163 define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
164 ; CHECK-LABEL: umax_i8:
165 ; CHECK: umax z0.b, p0/m, z0.b, z1.b
167 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1> %pg,
168 <vscale x 16 x i8> %a,
169 <vscale x 16 x i8> %b)
170 ret <vscale x 16 x i8> %out
173 define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
174 ; CHECK-LABEL: umax_i16:
175 ; CHECK: umax z0.h, p0/m, z0.h, z1.h
177 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1> %pg,
178 <vscale x 8 x i16> %a,
179 <vscale x 8 x i16> %b)
180 ret <vscale x 8 x i16> %out
183 define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
184 ; CHECK-LABEL: umax_i32:
185 ; CHECK: umax z0.s, p0/m, z0.s, z1.s
187 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1> %pg,
188 <vscale x 4 x i32> %a,
189 <vscale x 4 x i32> %b)
190 ret <vscale x 4 x i32> %out
193 define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
194 ; CHECK-LABEL: umax_i64:
195 ; CHECK: umax z0.d, p0/m, z0.d, z1.d
197 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1> %pg,
198 <vscale x 2 x i64> %a,
199 <vscale x 2 x i64> %b)
200 ret <vscale x 2 x i64> %out
203 define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
204 ; CHECK-LABEL: smin_i8:
205 ; CHECK: smin z0.b, p0/m, z0.b, z1.b
207 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1> %pg,
208 <vscale x 16 x i8> %a,
209 <vscale x 16 x i8> %b)
210 ret <vscale x 16 x i8> %out
213 define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
214 ; CHECK-LABEL: smin_i16:
215 ; CHECK: smin z0.h, p0/m, z0.h, z1.h
217 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1> %pg,
218 <vscale x 8 x i16> %a,
219 <vscale x 8 x i16> %b)
220 ret <vscale x 8 x i16> %out
223 define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
224 ; CHECK-LABEL: smin_i32:
225 ; CHECK: smin z0.s, p0/m, z0.s, z1.s
227 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1> %pg,
228 <vscale x 4 x i32> %a,
229 <vscale x 4 x i32> %b)
230 ret <vscale x 4 x i32> %out
233 define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
234 ; CHECK-LABEL: smin_i64:
235 ; CHECK: smin z0.d, p0/m, z0.d, z1.d
237 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1> %pg,
238 <vscale x 2 x i64> %a,
239 <vscale x 2 x i64> %b)
240 ret <vscale x 2 x i64> %out
243 define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
244 ; CHECK-LABEL: umin_i8:
245 ; CHECK: umin z0.b, p0/m, z0.b, z1.b
247 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1> %pg,
248 <vscale x 16 x i8> %a,
249 <vscale x 16 x i8> %b)
250 ret <vscale x 16 x i8> %out
253 define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
254 ; CHECK-LABEL: umin_i16:
255 ; CHECK: umin z0.h, p0/m, z0.h, z1.h
257 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1> %pg,
258 <vscale x 8 x i16> %a,
259 <vscale x 8 x i16> %b)
260 ret <vscale x 8 x i16> %out
263 define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
264 ; CHECK-LABEL: umin_i32:
265 ; CHECK: umin z0.s, p0/m, z0.s, z1.s
267 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1> %pg,
268 <vscale x 4 x i32> %a,
269 <vscale x 4 x i32> %b)
270 ret <vscale x 4 x i32> %out
273 define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
274 ; CHECK-LABEL: umin_i64:
275 ; CHECK: umin z0.d, p0/m, z0.d, z1.d
277 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %pg,
278 <vscale x 2 x i64> %a,
279 <vscale x 2 x i64> %b)
280 ret <vscale x 2 x i64> %out
283 define <vscale x 16 x i8> @sabd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
284 ; CHECK-LABEL: sabd_i8:
285 ; CHECK: sabd z0.b, p0/m, z0.b, z1.b
287 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.nxv16i8(<vscale x 16 x i1> %pg,
288 <vscale x 16 x i8> %a,
289 <vscale x 16 x i8> %b)
290 ret <vscale x 16 x i8> %out
293 define <vscale x 8 x i16> @sabd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
294 ; CHECK-LABEL: sabd_i16:
295 ; CHECK: sabd z0.h, p0/m, z0.h, z1.h
297 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.nxv8i16(<vscale x 8 x i1> %pg,
298 <vscale x 8 x i16> %a,
299 <vscale x 8 x i16> %b)
300 ret <vscale x 8 x i16> %out
303 define <vscale x 4 x i32> @sabd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
304 ; CHECK-LABEL: sabd_i32:
305 ; CHECK: sabd z0.s, p0/m, z0.s, z1.s
307 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.nxv4i32(<vscale x 4 x i1> %pg,
308 <vscale x 4 x i32> %a,
309 <vscale x 4 x i32> %b)
310 ret <vscale x 4 x i32> %out
313 define <vscale x 2 x i64> @sabd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
314 ; CHECK-LABEL: sabd_i64:
315 ; CHECK: sabd z0.d, p0/m, z0.d, z1.d
317 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.nxv2i64(<vscale x 2 x i1> %pg,
318 <vscale x 2 x i64> %a,
319 <vscale x 2 x i64> %b)
320 ret <vscale x 2 x i64> %out
323 define <vscale x 16 x i8> @uabd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
324 ; CHECK-LABEL: uabd_i8:
325 ; CHECK: uabd z0.b, p0/m, z0.b, z1.b
327 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uabd.nxv16i8(<vscale x 16 x i1> %pg,
328 <vscale x 16 x i8> %a,
329 <vscale x 16 x i8> %b)
330 ret <vscale x 16 x i8> %out
333 define <vscale x 8 x i16> @uabd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
334 ; CHECK-LABEL: uabd_i16:
335 ; CHECK: uabd z0.h, p0/m, z0.h, z1.h
337 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uabd.nxv8i16(<vscale x 8 x i1> %pg,
338 <vscale x 8 x i16> %a,
339 <vscale x 8 x i16> %b)
340 ret <vscale x 8 x i16> %out
343 define <vscale x 4 x i32> @uabd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
344 ; CHECK-LABEL: uabd_i32:
345 ; CHECK: uabd z0.s, p0/m, z0.s, z1.s
347 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uabd.nxv4i32(<vscale x 4 x i1> %pg,
348 <vscale x 4 x i32> %a,
349 <vscale x 4 x i32> %b)
350 ret <vscale x 4 x i32> %out
353 define <vscale x 2 x i64> @uabd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
354 ; CHECK-LABEL: uabd_i64:
355 ; CHECK: uabd z0.d, p0/m, z0.d, z1.d
357 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uabd.nxv2i64(<vscale x 2 x i1> %pg,
358 <vscale x 2 x i64> %a,
359 <vscale x 2 x i64> %b)
360 ret <vscale x 2 x i64> %out
363 declare <vscale x 16 x i8> @llvm.aarch64.sve.add.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
364 declare <vscale x 8 x i16> @llvm.aarch64.sve.add.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
365 declare <vscale x 4 x i32> @llvm.aarch64.sve.add.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
366 declare <vscale x 2 x i64> @llvm.aarch64.sve.add.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
368 declare <vscale x 16 x i8> @llvm.aarch64.sve.sub.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
369 declare <vscale x 8 x i16> @llvm.aarch64.sve.sub.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
370 declare <vscale x 4 x i32> @llvm.aarch64.sve.sub.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
371 declare <vscale x 2 x i64> @llvm.aarch64.sve.sub.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
373 declare <vscale x 16 x i8> @llvm.aarch64.sve.subr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
374 declare <vscale x 8 x i16> @llvm.aarch64.sve.subr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
375 declare <vscale x 4 x i32> @llvm.aarch64.sve.subr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
376 declare <vscale x 2 x i64> @llvm.aarch64.sve.subr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
378 declare <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
379 declare <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
380 declare <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
381 declare <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
383 declare <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
384 declare <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
385 declare <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
386 declare <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
388 declare <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
389 declare <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
390 declare <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
391 declare <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
393 declare <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
394 declare <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
395 declare <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
396 declare <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
398 declare <vscale x 16 x i8> @llvm.aarch64.sve.sabd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
399 declare <vscale x 8 x i16> @llvm.aarch64.sve.sabd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
400 declare <vscale x 4 x i32> @llvm.aarch64.sve.sabd.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
401 declare <vscale x 2 x i64> @llvm.aarch64.sve.sabd.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
403 declare <vscale x 16 x i8> @llvm.aarch64.sve.uabd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
404 declare <vscale x 8 x i16> @llvm.aarch64.sve.uabd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
405 declare <vscale x 4 x i32> @llvm.aarch64.sve.uabd.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
406 declare <vscale x 2 x i64> @llvm.aarch64.sve.uabd.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)