1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
3 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -force-streaming < %s | FileCheck %s
9 define <vscale x 16 x i8> @addp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
10 ; CHECK-LABEL: addp_i8:
12 ; CHECK-NEXT: addp z0.b, p0/m, z0.b, z1.b
14 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.addp.nxv16i8(<vscale x 16 x i1> %pg,
15 <vscale x 16 x i8> %a,
16 <vscale x 16 x i8> %b)
17 ret <vscale x 16 x i8> %out
20 define <vscale x 8 x i16> @addp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
21 ; CHECK-LABEL: addp_i16:
23 ; CHECK-NEXT: addp z0.h, p0/m, z0.h, z1.h
25 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.addp.nxv8i16(<vscale x 8 x i1> %pg,
26 <vscale x 8 x i16> %a,
27 <vscale x 8 x i16> %b)
28 ret <vscale x 8 x i16> %out
31 define <vscale x 4 x i32> @addp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
32 ; CHECK-LABEL: addp_i32:
34 ; CHECK-NEXT: addp z0.s, p0/m, z0.s, z1.s
36 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.addp.nxv4i32(<vscale x 4 x i1> %pg,
37 <vscale x 4 x i32> %a,
38 <vscale x 4 x i32> %b)
39 ret <vscale x 4 x i32> %out
42 define <vscale x 2 x i64> @addp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
43 ; CHECK-LABEL: addp_i64:
45 ; CHECK-NEXT: addp z0.d, p0/m, z0.d, z1.d
47 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.addp.nxv2i64(<vscale x 2 x i1> %pg,
48 <vscale x 2 x i64> %a,
49 <vscale x 2 x i64> %b)
50 ret <vscale x 2 x i64> %out
57 define <vscale x 8 x half> @faddp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
58 ; CHECK-LABEL: faddp_f16:
60 ; CHECK-NEXT: faddp z0.h, p0/m, z0.h, z1.h
62 %out = call <vscale x 8 x half> @llvm.aarch64.sve.faddp.nxv8f16(<vscale x 8 x i1> %pg,
63 <vscale x 8 x half> %a,
64 <vscale x 8 x half> %b)
65 ret <vscale x 8 x half> %out
68 define <vscale x 4 x float> @faddp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
69 ; CHECK-LABEL: faddp_f32:
71 ; CHECK-NEXT: faddp z0.s, p0/m, z0.s, z1.s
73 %out = call <vscale x 4 x float> @llvm.aarch64.sve.faddp.nxv4f32(<vscale x 4 x i1> %pg,
74 <vscale x 4 x float> %a,
75 <vscale x 4 x float> %b)
76 ret <vscale x 4 x float> %out
79 define <vscale x 2 x double> @faddp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
80 ; CHECK-LABEL: faddp_f64:
82 ; CHECK-NEXT: faddp z0.d, p0/m, z0.d, z1.d
84 %out = call <vscale x 2 x double> @llvm.aarch64.sve.faddp.nxv2f64(<vscale x 2 x i1> %pg,
85 <vscale x 2 x double> %a,
86 <vscale x 2 x double> %b)
87 ret <vscale x 2 x double> %out
94 define <vscale x 8 x half> @fmaxp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
95 ; CHECK-LABEL: fmaxp_f16:
97 ; CHECK-NEXT: fmaxp z0.h, p0/m, z0.h, z1.h
99 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxp.nxv8f16(<vscale x 8 x i1> %pg,
100 <vscale x 8 x half> %a,
101 <vscale x 8 x half> %b)
102 ret <vscale x 8 x half> %out
105 define <vscale x 4 x float> @fmaxp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
106 ; CHECK-LABEL: fmaxp_f32:
108 ; CHECK-NEXT: fmaxp z0.s, p0/m, z0.s, z1.s
110 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxp.nxv4f32(<vscale x 4 x i1> %pg,
111 <vscale x 4 x float> %a,
112 <vscale x 4 x float> %b)
113 ret <vscale x 4 x float> %out
116 define <vscale x 2 x double> @fmaxp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
117 ; CHECK-LABEL: fmaxp_f64:
119 ; CHECK-NEXT: fmaxp z0.d, p0/m, z0.d, z1.d
121 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxp.nxv2f64(<vscale x 2 x i1> %pg,
122 <vscale x 2 x double> %a,
123 <vscale x 2 x double> %b)
124 ret <vscale x 2 x double> %out
131 define <vscale x 8 x half> @fmaxnmp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
132 ; CHECK-LABEL: fmaxnmp_f16:
134 ; CHECK-NEXT: fmaxnmp z0.h, p0/m, z0.h, z1.h
136 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnmp.nxv8f16(<vscale x 8 x i1> %pg,
137 <vscale x 8 x half> %a,
138 <vscale x 8 x half> %b)
139 ret <vscale x 8 x half> %out
142 define <vscale x 4 x float> @fmaxnmp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
143 ; CHECK-LABEL: fmaxnmp_f32:
145 ; CHECK-NEXT: fmaxnmp z0.s, p0/m, z0.s, z1.s
147 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnmp.nxv4f32(<vscale x 4 x i1> %pg,
148 <vscale x 4 x float> %a,
149 <vscale x 4 x float> %b)
150 ret <vscale x 4 x float> %out
153 define <vscale x 2 x double> @fmaxnmp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
154 ; CHECK-LABEL: fmaxnmp_f64:
156 ; CHECK-NEXT: fmaxnmp z0.d, p0/m, z0.d, z1.d
158 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnmp.nxv2f64(<vscale x 2 x i1> %pg,
159 <vscale x 2 x double> %a,
160 <vscale x 2 x double> %b)
161 ret <vscale x 2 x double> %out
168 define <vscale x 8 x half> @fminp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
169 ; CHECK-LABEL: fminp_f16:
171 ; CHECK-NEXT: fminp z0.h, p0/m, z0.h, z1.h
173 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminp.nxv8f16(<vscale x 8 x i1> %pg,
174 <vscale x 8 x half> %a,
175 <vscale x 8 x half> %b)
176 ret <vscale x 8 x half> %out
179 define <vscale x 4 x float> @fminp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
180 ; CHECK-LABEL: fminp_f32:
182 ; CHECK-NEXT: fminp z0.s, p0/m, z0.s, z1.s
184 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminp.nxv4f32(<vscale x 4 x i1> %pg,
185 <vscale x 4 x float> %a,
186 <vscale x 4 x float> %b)
187 ret <vscale x 4 x float> %out
190 define <vscale x 2 x double> @fminp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
191 ; CHECK-LABEL: fminp_f64:
193 ; CHECK-NEXT: fminp z0.d, p0/m, z0.d, z1.d
195 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminp.nxv2f64(<vscale x 2 x i1> %pg,
196 <vscale x 2 x double> %a,
197 <vscale x 2 x double> %b)
198 ret <vscale x 2 x double> %out
205 define <vscale x 8 x half> @fminnmp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
206 ; CHECK-LABEL: fminnmp_f16:
208 ; CHECK-NEXT: fminnmp z0.h, p0/m, z0.h, z1.h
210 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnmp.nxv8f16(<vscale x 8 x i1> %pg,
211 <vscale x 8 x half> %a,
212 <vscale x 8 x half> %b)
213 ret <vscale x 8 x half> %out
216 define <vscale x 4 x float> @fminnmp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
217 ; CHECK-LABEL: fminnmp_f32:
219 ; CHECK-NEXT: fminnmp z0.s, p0/m, z0.s, z1.s
221 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnmp.nxv4f32(<vscale x 4 x i1> %pg,
222 <vscale x 4 x float> %a,
223 <vscale x 4 x float> %b)
224 ret <vscale x 4 x float> %out
227 define <vscale x 2 x double> @fminnmp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
228 ; CHECK-LABEL: fminnmp_f64:
230 ; CHECK-NEXT: fminnmp z0.d, p0/m, z0.d, z1.d
232 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnmp.nxv2f64(<vscale x 2 x i1> %pg,
233 <vscale x 2 x double> %a,
234 <vscale x 2 x double> %b)
235 ret <vscale x 2 x double> %out
242 define <vscale x 16 x i8> @smaxp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
243 ; CHECK-LABEL: smaxp_i8:
245 ; CHECK-NEXT: smaxp z0.b, p0/m, z0.b, z1.b
247 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smaxp.nxv16i8(<vscale x 16 x i1> %pg,
248 <vscale x 16 x i8> %a,
249 <vscale x 16 x i8> %b)
250 ret <vscale x 16 x i8> %out
253 define <vscale x 8 x i16> @smaxp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
254 ; CHECK-LABEL: smaxp_i16:
256 ; CHECK-NEXT: smaxp z0.h, p0/m, z0.h, z1.h
258 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smaxp.nxv8i16(<vscale x 8 x i1> %pg,
259 <vscale x 8 x i16> %a,
260 <vscale x 8 x i16> %b)
261 ret <vscale x 8 x i16> %out
264 define <vscale x 4 x i32> @smaxp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
265 ; CHECK-LABEL: smaxp_i32:
267 ; CHECK-NEXT: smaxp z0.s, p0/m, z0.s, z1.s
269 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smaxp.nxv4i32(<vscale x 4 x i1> %pg,
270 <vscale x 4 x i32> %a,
271 <vscale x 4 x i32> %b)
272 ret <vscale x 4 x i32> %out
275 define <vscale x 2 x i64> @smaxp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
276 ; CHECK-LABEL: smaxp_i64:
278 ; CHECK-NEXT: smaxp z0.d, p0/m, z0.d, z1.d
280 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smaxp.nxv2i64(<vscale x 2 x i1> %pg,
281 <vscale x 2 x i64> %a,
282 <vscale x 2 x i64> %b)
283 ret <vscale x 2 x i64> %out
290 define <vscale x 16 x i8> @sminp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
291 ; CHECK-LABEL: sminp_i8:
293 ; CHECK-NEXT: sminp z0.b, p0/m, z0.b, z1.b
295 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sminp.nxv16i8(<vscale x 16 x i1> %pg,
296 <vscale x 16 x i8> %a,
297 <vscale x 16 x i8> %b)
298 ret <vscale x 16 x i8> %out
301 define <vscale x 8 x i16> @sminp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
302 ; CHECK-LABEL: sminp_i16:
304 ; CHECK-NEXT: sminp z0.h, p0/m, z0.h, z1.h
306 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sminp.nxv8i16(<vscale x 8 x i1> %pg,
307 <vscale x 8 x i16> %a,
308 <vscale x 8 x i16> %b)
309 ret <vscale x 8 x i16> %out
312 define <vscale x 4 x i32> @sminp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
313 ; CHECK-LABEL: sminp_i32:
315 ; CHECK-NEXT: sminp z0.s, p0/m, z0.s, z1.s
317 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sminp.nxv4i32(<vscale x 4 x i1> %pg,
318 <vscale x 4 x i32> %a,
319 <vscale x 4 x i32> %b)
320 ret <vscale x 4 x i32> %out
323 define <vscale x 2 x i64> @sminp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
324 ; CHECK-LABEL: sminp_i64:
326 ; CHECK-NEXT: sminp z0.d, p0/m, z0.d, z1.d
328 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sminp.nxv2i64(<vscale x 2 x i1> %pg,
329 <vscale x 2 x i64> %a,
330 <vscale x 2 x i64> %b)
331 ret <vscale x 2 x i64> %out
338 define <vscale x 16 x i8> @uminp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
339 ; CHECK-LABEL: uminp_i8:
341 ; CHECK-NEXT: uminp z0.b, p0/m, z0.b, z1.b
343 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uminp.nxv16i8(<vscale x 16 x i1> %pg,
344 <vscale x 16 x i8> %a,
345 <vscale x 16 x i8> %b)
346 ret <vscale x 16 x i8> %out
349 define <vscale x 8 x i16> @uminp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
350 ; CHECK-LABEL: uminp_i16:
352 ; CHECK-NEXT: uminp z0.h, p0/m, z0.h, z1.h
354 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uminp.nxv8i16(<vscale x 8 x i1> %pg,
355 <vscale x 8 x i16> %a,
356 <vscale x 8 x i16> %b)
357 ret <vscale x 8 x i16> %out
360 define <vscale x 4 x i32> @uminp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
361 ; CHECK-LABEL: uminp_i32:
363 ; CHECK-NEXT: uminp z0.s, p0/m, z0.s, z1.s
365 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uminp.nxv4i32(<vscale x 4 x i1> %pg,
366 <vscale x 4 x i32> %a,
367 <vscale x 4 x i32> %b)
368 ret <vscale x 4 x i32> %out
371 define <vscale x 2 x i64> @uminp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
372 ; CHECK-LABEL: uminp_i64:
374 ; CHECK-NEXT: uminp z0.d, p0/m, z0.d, z1.d
376 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uminp.nxv2i64(<vscale x 2 x i1> %pg,
377 <vscale x 2 x i64> %a,
378 <vscale x 2 x i64> %b)
379 ret <vscale x 2 x i64> %out
386 define <vscale x 16 x i8> @umaxp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
387 ; CHECK-LABEL: umaxp_i8:
389 ; CHECK-NEXT: umaxp z0.b, p0/m, z0.b, z1.b
391 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umaxp.nxv16i8(<vscale x 16 x i1> %pg,
392 <vscale x 16 x i8> %a,
393 <vscale x 16 x i8> %b)
394 ret <vscale x 16 x i8> %out
397 define <vscale x 8 x i16> @umaxp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
398 ; CHECK-LABEL: umaxp_i16:
400 ; CHECK-NEXT: umaxp z0.h, p0/m, z0.h, z1.h
402 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umaxp.nxv8i16(<vscale x 8 x i1> %pg,
403 <vscale x 8 x i16> %a,
404 <vscale x 8 x i16> %b)
405 ret <vscale x 8 x i16> %out
408 define <vscale x 4 x i32> @umaxp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
409 ; CHECK-LABEL: umaxp_i32:
411 ; CHECK-NEXT: umaxp z0.s, p0/m, z0.s, z1.s
413 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umaxp.nxv4i32(<vscale x 4 x i1> %pg,
414 <vscale x 4 x i32> %a,
415 <vscale x 4 x i32> %b)
416 ret <vscale x 4 x i32> %out
419 define <vscale x 2 x i64> @umaxp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
420 ; CHECK-LABEL: umaxp_i64:
422 ; CHECK-NEXT: umaxp z0.d, p0/m, z0.d, z1.d
424 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umaxp.nxv2i64(<vscale x 2 x i1> %pg,
425 <vscale x 2 x i64> %a,
426 <vscale x 2 x i64> %b)
427 ret <vscale x 2 x i64> %out
430 declare <vscale x 16 x i8> @llvm.aarch64.sve.addp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
431 declare <vscale x 8 x i16> @llvm.aarch64.sve.addp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
432 declare <vscale x 4 x i32> @llvm.aarch64.sve.addp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
433 declare <vscale x 2 x i64> @llvm.aarch64.sve.addp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
435 declare <vscale x 8 x half> @llvm.aarch64.sve.faddp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
436 declare <vscale x 4 x float> @llvm.aarch64.sve.faddp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
437 declare <vscale x 2 x double> @llvm.aarch64.sve.faddp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
439 declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
440 declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
441 declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
443 declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxnmp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
444 declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxnmp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
445 declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxnmp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
447 declare <vscale x 8 x half> @llvm.aarch64.sve.fminp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
448 declare <vscale x 4 x float> @llvm.aarch64.sve.fminp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
449 declare <vscale x 2 x double> @llvm.aarch64.sve.fminp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
451 declare <vscale x 8 x half> @llvm.aarch64.sve.fminnmp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
452 declare <vscale x 4 x float> @llvm.aarch64.sve.fminnmp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
453 declare <vscale x 2 x double> @llvm.aarch64.sve.fminnmp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
455 declare <vscale x 16 x i8> @llvm.aarch64.sve.smaxp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
456 declare <vscale x 8 x i16> @llvm.aarch64.sve.smaxp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
457 declare <vscale x 4 x i32> @llvm.aarch64.sve.smaxp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
458 declare <vscale x 2 x i64> @llvm.aarch64.sve.smaxp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
460 declare <vscale x 16 x i8> @llvm.aarch64.sve.sminp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
461 declare <vscale x 8 x i16> @llvm.aarch64.sve.sminp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
462 declare <vscale x 4 x i32> @llvm.aarch64.sve.sminp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
463 declare <vscale x 2 x i64> @llvm.aarch64.sve.sminp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
465 declare <vscale x 16 x i8> @llvm.aarch64.sve.umaxp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
466 declare <vscale x 8 x i16> @llvm.aarch64.sve.umaxp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
467 declare <vscale x 4 x i32> @llvm.aarch64.sve.umaxp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
468 declare <vscale x 2 x i64> @llvm.aarch64.sve.umaxp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
470 declare <vscale x 16 x i8> @llvm.aarch64.sve.uminp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
471 declare <vscale x 8 x i16> @llvm.aarch64.sve.uminp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
472 declare <vscale x 4 x i32> @llvm.aarch64.sve.uminp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
473 declare <vscale x 2 x i64> @llvm.aarch64.sve.uminp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)