1 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
7 define <vscale x 16 x i8> @addp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
8 ; CHECK-LABEL: addp_i8:
9 ; CHECK: addp z0.b, p0/m, z0.b, z1.b
11 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.addp.nxv16i8(<vscale x 16 x i1> %pg,
12 <vscale x 16 x i8> %a,
13 <vscale x 16 x i8> %b)
14 ret <vscale x 16 x i8> %out
17 define <vscale x 8 x i16> @addp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
18 ; CHECK-LABEL: addp_i16:
19 ; CHECK: addp z0.h, p0/m, z0.h, z1.h
21 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.addp.nxv8i16(<vscale x 8 x i1> %pg,
22 <vscale x 8 x i16> %a,
23 <vscale x 8 x i16> %b)
24 ret <vscale x 8 x i16> %out
27 define <vscale x 4 x i32> @addp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
28 ; CHECK-LABEL: addp_i32:
29 ; CHECK: addp z0.s, p0/m, z0.s, z1.s
31 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.addp.nxv4i32(<vscale x 4 x i1> %pg,
32 <vscale x 4 x i32> %a,
33 <vscale x 4 x i32> %b)
34 ret <vscale x 4 x i32> %out
37 define <vscale x 2 x i64> @addp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
38 ; CHECK-LABEL: addp_i64:
39 ; CHECK: addp z0.d, p0/m, z0.d, z1.d
41 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.addp.nxv2i64(<vscale x 2 x i1> %pg,
42 <vscale x 2 x i64> %a,
43 <vscale x 2 x i64> %b)
44 ret <vscale x 2 x i64> %out
51 define <vscale x 8 x half> @faddp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
52 ; CHECK-LABEL: faddp_f16:
53 ; CHECK: faddp z0.h, p0/m, z0.h, z1.h
55 %out = call <vscale x 8 x half> @llvm.aarch64.sve.faddp.nxv8f16(<vscale x 8 x i1> %pg,
56 <vscale x 8 x half> %a,
57 <vscale x 8 x half> %b)
58 ret <vscale x 8 x half> %out
61 define <vscale x 4 x float> @faddp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
62 ; CHECK-LABEL: faddp_f32:
63 ; CHECK: faddp z0.s, p0/m, z0.s, z1.s
65 %out = call <vscale x 4 x float> @llvm.aarch64.sve.faddp.nxv4f32(<vscale x 4 x i1> %pg,
66 <vscale x 4 x float> %a,
67 <vscale x 4 x float> %b)
68 ret <vscale x 4 x float> %out
71 define <vscale x 2 x double> @faddp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
72 ; CHECK-LABEL: faddp_f64:
73 ; CHECK: faddp z0.d, p0/m, z0.d, z1.d
75 %out = call <vscale x 2 x double> @llvm.aarch64.sve.faddp.nxv2f64(<vscale x 2 x i1> %pg,
76 <vscale x 2 x double> %a,
77 <vscale x 2 x double> %b)
78 ret <vscale x 2 x double> %out
85 define <vscale x 8 x half> @fmaxp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
86 ; CHECK-LABEL: fmaxp_f16:
87 ; CHECK: fmaxp z0.h, p0/m, z0.h, z1.h
89 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxp.nxv8f16(<vscale x 8 x i1> %pg,
90 <vscale x 8 x half> %a,
91 <vscale x 8 x half> %b)
92 ret <vscale x 8 x half> %out
95 define <vscale x 4 x float> @fmaxp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
96 ; CHECK-LABEL: fmaxp_f32:
97 ; CHECK: fmaxp z0.s, p0/m, z0.s, z1.s
99 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxp.nxv4f32(<vscale x 4 x i1> %pg,
100 <vscale x 4 x float> %a,
101 <vscale x 4 x float> %b)
102 ret <vscale x 4 x float> %out
105 define <vscale x 2 x double> @fmaxp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
106 ; CHECK-LABEL: fmaxp_f64:
107 ; CHECK: fmaxp z0.d, p0/m, z0.d, z1.d
109 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxp.nxv2f64(<vscale x 2 x i1> %pg,
110 <vscale x 2 x double> %a,
111 <vscale x 2 x double> %b)
112 ret <vscale x 2 x double> %out
119 define <vscale x 8 x half> @fmaxnmp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
120 ; CHECK-LABEL: fmaxnmp_f16:
121 ; CHECK: fmaxnmp z0.h, p0/m, z0.h, z1.h
123 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnmp.nxv8f16(<vscale x 8 x i1> %pg,
124 <vscale x 8 x half> %a,
125 <vscale x 8 x half> %b)
126 ret <vscale x 8 x half> %out
129 define <vscale x 4 x float> @fmaxnmp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
130 ; CHECK-LABEL: fmaxnmp_f32:
131 ; CHECK: fmaxnmp z0.s, p0/m, z0.s, z1.s
133 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnmp.nxv4f32(<vscale x 4 x i1> %pg,
134 <vscale x 4 x float> %a,
135 <vscale x 4 x float> %b)
136 ret <vscale x 4 x float> %out
139 define <vscale x 2 x double> @fmaxnmp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
140 ; CHECK-LABEL: fmaxnmp_f64:
141 ; CHECK: fmaxnmp z0.d, p0/m, z0.d, z1.d
143 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnmp.nxv2f64(<vscale x 2 x i1> %pg,
144 <vscale x 2 x double> %a,
145 <vscale x 2 x double> %b)
146 ret <vscale x 2 x double> %out
153 define <vscale x 8 x half> @fminp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
154 ; CHECK-LABEL: fminp_f16:
155 ; CHECK: fminp z0.h, p0/m, z0.h, z1.h
157 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminp.nxv8f16(<vscale x 8 x i1> %pg,
158 <vscale x 8 x half> %a,
159 <vscale x 8 x half> %b)
160 ret <vscale x 8 x half> %out
163 define <vscale x 4 x float> @fminp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
164 ; CHECK-LABEL: fminp_f32:
165 ; CHECK: fminp z0.s, p0/m, z0.s, z1.s
167 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminp.nxv4f32(<vscale x 4 x i1> %pg,
168 <vscale x 4 x float> %a,
169 <vscale x 4 x float> %b)
170 ret <vscale x 4 x float> %out
173 define <vscale x 2 x double> @fminp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
174 ; CHECK-LABEL: fminp_f64:
175 ; CHECK: fminp z0.d, p0/m, z0.d, z1.d
177 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminp.nxv2f64(<vscale x 2 x i1> %pg,
178 <vscale x 2 x double> %a,
179 <vscale x 2 x double> %b)
180 ret <vscale x 2 x double> %out
187 define <vscale x 8 x half> @fminnmp_f16(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
188 ; CHECK-LABEL: fminnmp_f16:
189 ; CHECK: fminnmp z0.h, p0/m, z0.h, z1.h
191 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnmp.nxv8f16(<vscale x 8 x i1> %pg,
192 <vscale x 8 x half> %a,
193 <vscale x 8 x half> %b)
194 ret <vscale x 8 x half> %out
197 define <vscale x 4 x float> @fminnmp_f32(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
198 ; CHECK-LABEL: fminnmp_f32:
199 ; CHECK: fminnmp z0.s, p0/m, z0.s, z1.s
201 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnmp.nxv4f32(<vscale x 4 x i1> %pg,
202 <vscale x 4 x float> %a,
203 <vscale x 4 x float> %b)
204 ret <vscale x 4 x float> %out
207 define <vscale x 2 x double> @fminnmp_f64(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
208 ; CHECK-LABEL: fminnmp_f64:
209 ; CHECK: fminnmp z0.d, p0/m, z0.d, z1.d
211 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnmp.nxv2f64(<vscale x 2 x i1> %pg,
212 <vscale x 2 x double> %a,
213 <vscale x 2 x double> %b)
214 ret <vscale x 2 x double> %out
221 define <vscale x 16 x i8> @smaxp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
222 ; CHECK-LABEL: smaxp_i8:
223 ; CHECK: smaxp z0.b, p0/m, z0.b, z1.b
225 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smaxp.nxv16i8(<vscale x 16 x i1> %pg,
226 <vscale x 16 x i8> %a,
227 <vscale x 16 x i8> %b)
228 ret <vscale x 16 x i8> %out
231 define <vscale x 8 x i16> @smaxp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
232 ; CHECK-LABEL: smaxp_i16:
233 ; CHECK: smaxp z0.h, p0/m, z0.h, z1.h
235 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smaxp.nxv8i16(<vscale x 8 x i1> %pg,
236 <vscale x 8 x i16> %a,
237 <vscale x 8 x i16> %b)
238 ret <vscale x 8 x i16> %out
241 define <vscale x 4 x i32> @smaxp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
242 ; CHECK-LABEL: smaxp_i32:
243 ; CHECK: smaxp z0.s, p0/m, z0.s, z1.s
245 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smaxp.nxv4i32(<vscale x 4 x i1> %pg,
246 <vscale x 4 x i32> %a,
247 <vscale x 4 x i32> %b)
248 ret <vscale x 4 x i32> %out
251 define <vscale x 2 x i64> @smaxp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
252 ; CHECK-LABEL: smaxp_i64:
253 ; CHECK: smaxp z0.d, p0/m, z0.d, z1.d
255 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smaxp.nxv2i64(<vscale x 2 x i1> %pg,
256 <vscale x 2 x i64> %a,
257 <vscale x 2 x i64> %b)
258 ret <vscale x 2 x i64> %out
265 define <vscale x 16 x i8> @sminp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
266 ; CHECK-LABEL: sminp_i8:
267 ; CHECK: sminp z0.b, p0/m, z0.b, z1.b
269 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sminp.nxv16i8(<vscale x 16 x i1> %pg,
270 <vscale x 16 x i8> %a,
271 <vscale x 16 x i8> %b)
272 ret <vscale x 16 x i8> %out
275 define <vscale x 8 x i16> @sminp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
276 ; CHECK-LABEL: sminp_i16:
277 ; CHECK: sminp z0.h, p0/m, z0.h, z1.h
279 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sminp.nxv8i16(<vscale x 8 x i1> %pg,
280 <vscale x 8 x i16> %a,
281 <vscale x 8 x i16> %b)
282 ret <vscale x 8 x i16> %out
285 define <vscale x 4 x i32> @sminp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
286 ; CHECK-LABEL: sminp_i32:
287 ; CHECK: sminp z0.s, p0/m, z0.s, z1.s
289 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sminp.nxv4i32(<vscale x 4 x i1> %pg,
290 <vscale x 4 x i32> %a,
291 <vscale x 4 x i32> %b)
292 ret <vscale x 4 x i32> %out
295 define <vscale x 2 x i64> @sminp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
296 ; CHECK-LABEL: sminp_i64:
297 ; CHECK: sminp z0.d, p0/m, z0.d, z1.d
299 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sminp.nxv2i64(<vscale x 2 x i1> %pg,
300 <vscale x 2 x i64> %a,
301 <vscale x 2 x i64> %b)
302 ret <vscale x 2 x i64> %out
309 define <vscale x 16 x i8> @uminp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
310 ; CHECK-LABEL: uminp_i8:
311 ; CHECK: uminp z0.b, p0/m, z0.b, z1.b
313 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uminp.nxv16i8(<vscale x 16 x i1> %pg,
314 <vscale x 16 x i8> %a,
315 <vscale x 16 x i8> %b)
316 ret <vscale x 16 x i8> %out
319 define <vscale x 8 x i16> @uminp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
320 ; CHECK-LABEL: uminp_i16:
321 ; CHECK: uminp z0.h, p0/m, z0.h, z1.h
323 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uminp.nxv8i16(<vscale x 8 x i1> %pg,
324 <vscale x 8 x i16> %a,
325 <vscale x 8 x i16> %b)
326 ret <vscale x 8 x i16> %out
329 define <vscale x 4 x i32> @uminp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
330 ; CHECK-LABEL: uminp_i32:
331 ; CHECK: uminp z0.s, p0/m, z0.s, z1.s
333 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uminp.nxv4i32(<vscale x 4 x i1> %pg,
334 <vscale x 4 x i32> %a,
335 <vscale x 4 x i32> %b)
336 ret <vscale x 4 x i32> %out
339 define <vscale x 2 x i64> @uminp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
340 ; CHECK-LABEL: uminp_i64:
341 ; CHECK: uminp z0.d, p0/m, z0.d, z1.d
343 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uminp.nxv2i64(<vscale x 2 x i1> %pg,
344 <vscale x 2 x i64> %a,
345 <vscale x 2 x i64> %b)
346 ret <vscale x 2 x i64> %out
353 define <vscale x 16 x i8> @umaxp_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
354 ; CHECK-LABEL: umaxp_i8:
355 ; CHECK: umaxp z0.b, p0/m, z0.b, z1.b
357 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umaxp.nxv16i8(<vscale x 16 x i1> %pg,
358 <vscale x 16 x i8> %a,
359 <vscale x 16 x i8> %b)
360 ret <vscale x 16 x i8> %out
363 define <vscale x 8 x i16> @umaxp_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
364 ; CHECK-LABEL: umaxp_i16:
365 ; CHECK: umaxp z0.h, p0/m, z0.h, z1.h
367 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umaxp.nxv8i16(<vscale x 8 x i1> %pg,
368 <vscale x 8 x i16> %a,
369 <vscale x 8 x i16> %b)
370 ret <vscale x 8 x i16> %out
373 define <vscale x 4 x i32> @umaxp_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
374 ; CHECK-LABEL: umaxp_i32:
375 ; CHECK: umaxp z0.s, p0/m, z0.s, z1.s
377 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umaxp.nxv4i32(<vscale x 4 x i1> %pg,
378 <vscale x 4 x i32> %a,
379 <vscale x 4 x i32> %b)
380 ret <vscale x 4 x i32> %out
383 define <vscale x 2 x i64> @umaxp_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
384 ; CHECK-LABEL: umaxp_i64:
385 ; CHECK: umaxp z0.d, p0/m, z0.d, z1.d
387 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umaxp.nxv2i64(<vscale x 2 x i1> %pg,
388 <vscale x 2 x i64> %a,
389 <vscale x 2 x i64> %b)
390 ret <vscale x 2 x i64> %out
393 declare <vscale x 16 x i8> @llvm.aarch64.sve.addp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
394 declare <vscale x 8 x i16> @llvm.aarch64.sve.addp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
395 declare <vscale x 4 x i32> @llvm.aarch64.sve.addp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
396 declare <vscale x 2 x i64> @llvm.aarch64.sve.addp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
398 declare <vscale x 8 x half> @llvm.aarch64.sve.faddp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
399 declare <vscale x 4 x float> @llvm.aarch64.sve.faddp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
400 declare <vscale x 2 x double> @llvm.aarch64.sve.faddp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
402 declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
403 declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
404 declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
406 declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxnmp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
407 declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxnmp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
408 declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxnmp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
410 declare <vscale x 8 x half> @llvm.aarch64.sve.fminp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
411 declare <vscale x 4 x float> @llvm.aarch64.sve.fminp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
412 declare <vscale x 2 x double> @llvm.aarch64.sve.fminp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
414 declare <vscale x 8 x half> @llvm.aarch64.sve.fminnmp.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
415 declare <vscale x 4 x float> @llvm.aarch64.sve.fminnmp.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
416 declare <vscale x 2 x double> @llvm.aarch64.sve.fminnmp.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
418 declare <vscale x 16 x i8> @llvm.aarch64.sve.smaxp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
419 declare <vscale x 8 x i16> @llvm.aarch64.sve.smaxp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
420 declare <vscale x 4 x i32> @llvm.aarch64.sve.smaxp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
421 declare <vscale x 2 x i64> @llvm.aarch64.sve.smaxp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
423 declare <vscale x 16 x i8> @llvm.aarch64.sve.sminp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
424 declare <vscale x 8 x i16> @llvm.aarch64.sve.sminp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
425 declare <vscale x 4 x i32> @llvm.aarch64.sve.sminp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
426 declare <vscale x 2 x i64> @llvm.aarch64.sve.sminp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
428 declare <vscale x 16 x i8> @llvm.aarch64.sve.umaxp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
429 declare <vscale x 8 x i16> @llvm.aarch64.sve.umaxp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
430 declare <vscale x 4 x i32> @llvm.aarch64.sve.umaxp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
431 declare <vscale x 2 x i64> @llvm.aarch64.sve.umaxp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
433 declare <vscale x 16 x i8> @llvm.aarch64.sve.uminp.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
434 declare <vscale x 8 x i16> @llvm.aarch64.sve.uminp.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
435 declare <vscale x 4 x i32> @llvm.aarch64.sve.uminp.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
436 declare <vscale x 2 x i64> @llvm.aarch64.sve.uminp.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)