1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mattr=+sve < %s | FileCheck %s -check-prefixes=CHECK,SVE1
3 ; RUN: llc -mattr=+sve2 < %s | FileCheck %s -check-prefixes=CHECK,SVE2
5 target triple = "aarch64-unknown-linux-gnu"
11 define <vscale x 16 x i8> @add_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
12 ; CHECK-LABEL: add_i8:
14 ; CHECK-NEXT: add z0.b, z0.b, z1.b
16 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.add.u.nxv16i8(<vscale x 16 x i1> %pg,
17 <vscale x 16 x i8> %a,
18 <vscale x 16 x i8> %b)
19 ret <vscale x 16 x i8> %out
22 define <vscale x 8 x i16> @add_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
23 ; CHECK-LABEL: add_i16:
25 ; CHECK-NEXT: add z0.h, z0.h, z1.h
27 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.add.u.nxv8i16(<vscale x 8 x i1> %pg,
28 <vscale x 8 x i16> %a,
29 <vscale x 8 x i16> %b)
30 ret <vscale x 8 x i16> %out
33 define <vscale x 4 x i32> @add_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
34 ; CHECK-LABEL: add_i32:
36 ; CHECK-NEXT: add z0.s, z0.s, z1.s
38 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.add.u.nxv4i32(<vscale x 4 x i1> %pg,
39 <vscale x 4 x i32> %a,
40 <vscale x 4 x i32> %b)
41 ret <vscale x 4 x i32> %out
44 define <vscale x 2 x i64> @add_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
45 ; CHECK-LABEL: add_i64:
47 ; CHECK-NEXT: add z0.d, z0.d, z1.d
49 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.add.u.nxv2i64(<vscale x 2 x i1> %pg,
50 <vscale x 2 x i64> %a,
51 <vscale x 2 x i64> %b)
52 ret <vscale x 2 x i64> %out
59 define <vscale x 16 x i8> @add_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
60 ; CHECK-LABEL: add_imm_i8:
62 ; CHECK-NEXT: add z0.b, z0.b, #3 // =0x3
64 %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
65 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
66 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.add.u.nxv16i8(<vscale x 16 x i1> %pg,
67 <vscale x 16 x i8> %a,
68 <vscale x 16 x i8> %imm.splat)
69 ret <vscale x 16 x i8> %out
72 define <vscale x 8 x i16> @add_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
73 ; CHECK-LABEL: add_imm_i16:
75 ; CHECK-NEXT: add z0.h, z0.h, #4 // =0x4
77 %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
78 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
79 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.add.u.nxv8i16(<vscale x 8 x i1> %pg,
80 <vscale x 8 x i16> %a,
81 <vscale x 8 x i16> %imm.splat)
82 ret <vscale x 8 x i16> %out
85 define <vscale x 4 x i32> @add_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
86 ; CHECK-LABEL: add_imm_i32:
88 ; CHECK-NEXT: add z0.s, z0.s, #5 // =0x5
90 %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
91 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
92 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.add.u.nxv4i32(<vscale x 4 x i1> %pg,
93 <vscale x 4 x i32> %a,
94 <vscale x 4 x i32> %imm.splat)
95 ret <vscale x 4 x i32> %out
98 define <vscale x 2 x i64> @add_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
99 ; CHECK-LABEL: add_imm_i64:
101 ; CHECK-NEXT: add z0.d, z0.d, #6 // =0x6
103 %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
104 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
105 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.add.u.nxv2i64(<vscale x 2 x i1> %pg,
106 <vscale x 2 x i64> %a,
107 <vscale x 2 x i64> %imm.splat)
108 ret <vscale x 2 x i64> %out
115 define <vscale x 16 x i8> @mla_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
116 ; CHECK-LABEL: mla_i8:
118 ; CHECK-NEXT: mla z0.b, p0/m, z1.b, z2.b
120 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.mla.u.nxv16i8(<vscale x 16 x i1> %pg,
121 <vscale x 16 x i8> %a,
122 <vscale x 16 x i8> %b,
123 <vscale x 16 x i8> %c)
124 ret <vscale x 16 x i8> %out
127 define <vscale x 8 x i16> @mla_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
128 ; CHECK-LABEL: mla_i16:
130 ; CHECK-NEXT: mla z0.h, p0/m, z1.h, z2.h
132 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.mla.u.nxv8i16(<vscale x 8 x i1> %pg,
133 <vscale x 8 x i16> %a,
134 <vscale x 8 x i16> %b,
135 <vscale x 8 x i16> %c)
136 ret <vscale x 8 x i16> %out
139 define <vscale x 4 x i32> @mla_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
140 ; CHECK-LABEL: mla_i32:
142 ; CHECK-NEXT: mla z0.s, p0/m, z1.s, z2.s
144 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.mla.u.nxv4i32(<vscale x 4 x i1> %pg,
145 <vscale x 4 x i32> %a,
146 <vscale x 4 x i32> %b,
147 <vscale x 4 x i32> %c)
148 ret <vscale x 4 x i32> %out
151 define <vscale x 2 x i64> @mla_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
152 ; CHECK-LABEL: mla_i64:
154 ; CHECK-NEXT: mla z0.d, p0/m, z1.d, z2.d
156 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.mla.u.nxv2i64(<vscale x 2 x i1> %pg,
157 <vscale x 2 x i64> %a,
158 <vscale x 2 x i64> %b,
159 <vscale x 2 x i64> %c)
160 ret <vscale x 2 x i64> %out
167 define <vscale x 16 x i8> @mls_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
168 ; CHECK-LABEL: mls_i8:
170 ; CHECK-NEXT: mls z0.b, p0/m, z1.b, z2.b
172 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.mls.u.nxv16i8(<vscale x 16 x i1> %pg,
173 <vscale x 16 x i8> %a,
174 <vscale x 16 x i8> %b,
175 <vscale x 16 x i8> %c)
176 ret <vscale x 16 x i8> %out
179 define <vscale x 8 x i16> @mls_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
180 ; CHECK-LABEL: mls_i16:
182 ; CHECK-NEXT: mls z0.h, p0/m, z1.h, z2.h
184 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.mls.u.nxv8i16(<vscale x 8 x i1> %pg,
185 <vscale x 8 x i16> %a,
186 <vscale x 8 x i16> %b,
187 <vscale x 8 x i16> %c)
188 ret <vscale x 8 x i16> %out
191 define <vscale x 4 x i32> @mls_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
192 ; CHECK-LABEL: mls_i32:
194 ; CHECK-NEXT: mls z0.s, p0/m, z1.s, z2.s
196 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.mls.u.nxv4i32(<vscale x 4 x i1> %pg,
197 <vscale x 4 x i32> %a,
198 <vscale x 4 x i32> %b,
199 <vscale x 4 x i32> %c)
200 ret <vscale x 4 x i32> %out
203 define <vscale x 2 x i64> @mls_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
204 ; CHECK-LABEL: mls_i64:
206 ; CHECK-NEXT: mls z0.d, p0/m, z1.d, z2.d
208 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.mls.u.nxv2i64(<vscale x 2 x i1> %pg,
209 <vscale x 2 x i64> %a,
210 <vscale x 2 x i64> %b,
211 <vscale x 2 x i64> %c)
212 ret <vscale x 2 x i64> %out
219 define <vscale x 16 x i8> @mul_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
220 ; SVE1-LABEL: mul_i8:
222 ; SVE1-NEXT: mul z0.b, p0/m, z0.b, z1.b
225 ; SVE2-LABEL: mul_i8:
227 ; SVE2-NEXT: mul z0.b, z0.b, z1.b
229 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1> %pg,
230 <vscale x 16 x i8> %a,
231 <vscale x 16 x i8> %b)
232 ret <vscale x 16 x i8> %out
235 define <vscale x 8 x i16> @mul_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
236 ; SVE1-LABEL: mul_i16:
238 ; SVE1-NEXT: mul z0.h, p0/m, z0.h, z1.h
241 ; SVE2-LABEL: mul_i16:
243 ; SVE2-NEXT: mul z0.h, z0.h, z1.h
245 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg,
246 <vscale x 8 x i16> %a,
247 <vscale x 8 x i16> %b)
248 ret <vscale x 8 x i16> %out
251 define <vscale x 4 x i32> @mul_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
252 ; SVE1-LABEL: mul_i32:
254 ; SVE1-NEXT: mul z0.s, p0/m, z0.s, z1.s
257 ; SVE2-LABEL: mul_i32:
259 ; SVE2-NEXT: mul z0.s, z0.s, z1.s
261 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg,
262 <vscale x 4 x i32> %a,
263 <vscale x 4 x i32> %b)
264 ret <vscale x 4 x i32> %out
267 define <vscale x 2 x i64> @mul_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
268 ; SVE1-LABEL: mul_i64:
270 ; SVE1-NEXT: mul z0.d, p0/m, z0.d, z1.d
273 ; SVE2-LABEL: mul_i64:
275 ; SVE2-NEXT: mul z0.d, z0.d, z1.d
277 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg,
278 <vscale x 2 x i64> %a,
279 <vscale x 2 x i64> %b)
280 ret <vscale x 2 x i64> %out
287 define <vscale x 16 x i8> @mul_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
288 ; CHECK-LABEL: mul_imm_i8:
290 ; CHECK-NEXT: mul z0.b, z0.b, #3
292 %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
293 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
294 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1> %pg,
295 <vscale x 16 x i8> %a,
296 <vscale x 16 x i8> %imm.splat)
297 ret <vscale x 16 x i8> %out
300 define <vscale x 8 x i16> @mul_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
301 ; CHECK-LABEL: mul_imm_i16:
303 ; CHECK-NEXT: mul z0.h, z0.h, #4
305 %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
306 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
307 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1> %pg,
308 <vscale x 8 x i16> %a,
309 <vscale x 8 x i16> %imm.splat)
310 ret <vscale x 8 x i16> %out
313 define <vscale x 4 x i32> @mul_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
314 ; CHECK-LABEL: mul_imm_i32:
316 ; CHECK-NEXT: mul z0.s, z0.s, #5
318 %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
319 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
320 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1> %pg,
321 <vscale x 4 x i32> %a,
322 <vscale x 4 x i32> %imm.splat)
323 ret <vscale x 4 x i32> %out
326 define <vscale x 2 x i64> @mul_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
327 ; CHECK-LABEL: mul_imm_i64:
329 ; CHECK-NEXT: mul z0.d, z0.d, #6
331 %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
332 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
333 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1> %pg,
334 <vscale x 2 x i64> %a,
335 <vscale x 2 x i64> %imm.splat)
336 ret <vscale x 2 x i64> %out
343 define <vscale x 16 x i8> @sabd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
344 ; CHECK-LABEL: sabd_i8:
346 ; CHECK-NEXT: ptrue p0.b
347 ; CHECK-NEXT: sabd z0.b, p0/m, z0.b, z1.b
349 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.u.nxv16i8(<vscale x 16 x i1> %pg,
350 <vscale x 16 x i8> %a,
351 <vscale x 16 x i8> %b)
352 ret <vscale x 16 x i8> %out
355 define <vscale x 8 x i16> @sabd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
356 ; CHECK-LABEL: sabd_i16:
358 ; CHECK-NEXT: ptrue p0.h
359 ; CHECK-NEXT: sabd z0.h, p0/m, z0.h, z1.h
361 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.u.nxv8i16(<vscale x 8 x i1> %pg,
362 <vscale x 8 x i16> %a,
363 <vscale x 8 x i16> %b)
364 ret <vscale x 8 x i16> %out
367 define <vscale x 4 x i32> @sabd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
368 ; CHECK-LABEL: sabd_i32:
370 ; CHECK-NEXT: ptrue p0.s
371 ; CHECK-NEXT: sabd z0.s, p0/m, z0.s, z1.s
373 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.u.nxv4i32(<vscale x 4 x i1> %pg,
374 <vscale x 4 x i32> %a,
375 <vscale x 4 x i32> %b)
376 ret <vscale x 4 x i32> %out
379 define <vscale x 2 x i64> @sabd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
380 ; CHECK-LABEL: sabd_i64:
382 ; CHECK-NEXT: ptrue p0.d
383 ; CHECK-NEXT: sabd z0.d, p0/m, z0.d, z1.d
385 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.u.nxv2i64(<vscale x 2 x i1> %pg,
386 <vscale x 2 x i64> %a,
387 <vscale x 2 x i64> %b)
388 ret <vscale x 2 x i64> %out
395 define <vscale x 4 x i32> @sdiv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
396 ; CHECK-LABEL: sdiv_i32:
398 ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s
400 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sdiv.u.nxv4i32(<vscale x 4 x i1> %pg,
401 <vscale x 4 x i32> %a,
402 <vscale x 4 x i32> %b)
403 ret <vscale x 4 x i32> %out
406 define <vscale x 2 x i64> @sdiv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
407 ; CHECK-LABEL: sdiv_i64:
409 ; CHECK-NEXT: sdiv z0.d, p0/m, z0.d, z1.d
411 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sdiv.u.nxv2i64(<vscale x 2 x i1> %pg,
412 <vscale x 2 x i64> %a,
413 <vscale x 2 x i64> %b)
414 ret <vscale x 2 x i64> %out
421 define <vscale x 4 x i32> @sdivr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
422 ; CHECK-LABEL: sdivr_i32:
424 ; CHECK-NEXT: sdivr z0.s, p0/m, z0.s, z1.s
426 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sdiv.u.nxv4i32(<vscale x 4 x i1> %pg,
427 <vscale x 4 x i32> %b,
428 <vscale x 4 x i32> %a)
429 ret <vscale x 4 x i32> %out
432 define <vscale x 2 x i64> @sdivr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
433 ; CHECK-LABEL: sdivr_i64:
435 ; CHECK-NEXT: sdivr z0.d, p0/m, z0.d, z1.d
437 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sdiv.u.nxv2i64(<vscale x 2 x i1> %pg,
438 <vscale x 2 x i64> %b,
439 <vscale x 2 x i64> %a)
440 ret <vscale x 2 x i64> %out
447 define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
448 ; CHECK-LABEL: smax_i8:
450 ; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b
452 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.u.nxv16i8(<vscale x 16 x i1> %pg,
453 <vscale x 16 x i8> %a,
454 <vscale x 16 x i8> %b)
455 ret <vscale x 16 x i8> %out
458 define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
459 ; CHECK-LABEL: smax_i16:
461 ; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h
463 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.u.nxv8i16(<vscale x 8 x i1> %pg,
464 <vscale x 8 x i16> %a,
465 <vscale x 8 x i16> %b)
466 ret <vscale x 8 x i16> %out
469 define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
470 ; CHECK-LABEL: smax_i32:
472 ; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s
474 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.u.nxv4i32(<vscale x 4 x i1> %pg,
475 <vscale x 4 x i32> %a,
476 <vscale x 4 x i32> %b)
477 ret <vscale x 4 x i32> %out
480 define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
481 ; CHECK-LABEL: smax_i64:
483 ; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d
485 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.u.nxv2i64(<vscale x 2 x i1> %pg,
486 <vscale x 2 x i64> %a,
487 <vscale x 2 x i64> %b)
488 ret <vscale x 2 x i64> %out
495 define <vscale x 16 x i8> @smax_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
496 ; CHECK-LABEL: smax_imm_i8:
498 ; CHECK-NEXT: smax z0.b, z0.b, #3
500 %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
501 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
502 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.u.nxv16i8(<vscale x 16 x i1> %pg,
503 <vscale x 16 x i8> %a,
504 <vscale x 16 x i8> %imm.splat)
505 ret <vscale x 16 x i8> %out
508 define <vscale x 8 x i16> @smax_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
509 ; CHECK-LABEL: smax_imm_i16:
511 ; CHECK-NEXT: smax z0.h, z0.h, #4
513 %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
514 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
515 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.u.nxv8i16(<vscale x 8 x i1> %pg,
516 <vscale x 8 x i16> %a,
517 <vscale x 8 x i16> %imm.splat)
518 ret <vscale x 8 x i16> %out
521 define <vscale x 4 x i32> @smax_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
522 ; CHECK-LABEL: smax_imm_i32:
524 ; CHECK-NEXT: smax z0.s, z0.s, #5
526 %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
527 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
528 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.u.nxv4i32(<vscale x 4 x i1> %pg,
529 <vscale x 4 x i32> %a,
530 <vscale x 4 x i32> %imm.splat)
531 ret <vscale x 4 x i32> %out
534 define <vscale x 2 x i64> @smax_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
535 ; CHECK-LABEL: smax_imm_i64:
537 ; CHECK-NEXT: smax z0.d, z0.d, #6
539 %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
540 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
541 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.u.nxv2i64(<vscale x 2 x i1> %pg,
542 <vscale x 2 x i64> %a,
543 <vscale x 2 x i64> %imm.splat)
544 ret <vscale x 2 x i64> %out
551 define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
552 ; CHECK-LABEL: smin_i8:
554 ; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b
556 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.u.nxv16i8(<vscale x 16 x i1> %pg,
557 <vscale x 16 x i8> %a,
558 <vscale x 16 x i8> %b)
559 ret <vscale x 16 x i8> %out
562 define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
563 ; CHECK-LABEL: smin_i16:
565 ; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h
567 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.u.nxv8i16(<vscale x 8 x i1> %pg,
568 <vscale x 8 x i16> %a,
569 <vscale x 8 x i16> %b)
570 ret <vscale x 8 x i16> %out
573 define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
574 ; CHECK-LABEL: smin_i32:
576 ; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s
578 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.u.nxv4i32(<vscale x 4 x i1> %pg,
579 <vscale x 4 x i32> %a,
580 <vscale x 4 x i32> %b)
581 ret <vscale x 4 x i32> %out
584 define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
585 ; CHECK-LABEL: smin_i64:
587 ; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d
589 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.u.nxv2i64(<vscale x 2 x i1> %pg,
590 <vscale x 2 x i64> %a,
591 <vscale x 2 x i64> %b)
592 ret <vscale x 2 x i64> %out
599 define <vscale x 16 x i8> @smin_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
600 ; CHECK-LABEL: smin_imm_i8:
602 ; CHECK-NEXT: smin z0.b, z0.b, #3
604 %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
605 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
606 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.u.nxv16i8(<vscale x 16 x i1> %pg,
607 <vscale x 16 x i8> %a,
608 <vscale x 16 x i8> %imm.splat)
609 ret <vscale x 16 x i8> %out
612 define <vscale x 8 x i16> @smin_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
613 ; CHECK-LABEL: smin_imm_i16:
615 ; CHECK-NEXT: smin z0.h, z0.h, #4
617 %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
618 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
619 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.u.nxv8i16(<vscale x 8 x i1> %pg,
620 <vscale x 8 x i16> %a,
621 <vscale x 8 x i16> %imm.splat)
622 ret <vscale x 8 x i16> %out
625 define <vscale x 4 x i32> @smin_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
626 ; CHECK-LABEL: smin_imm_i32:
628 ; CHECK-NEXT: smin z0.s, z0.s, #5
630 %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
631 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
632 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.u.nxv4i32(<vscale x 4 x i1> %pg,
633 <vscale x 4 x i32> %a,
634 <vscale x 4 x i32> %imm.splat)
635 ret <vscale x 4 x i32> %out
638 define <vscale x 2 x i64> @smin_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
639 ; CHECK-LABEL: smin_imm_i64:
641 ; CHECK-NEXT: smin z0.d, z0.d, #6
643 %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
644 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
645 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.u.nxv2i64(<vscale x 2 x i1> %pg,
646 <vscale x 2 x i64> %a,
647 <vscale x 2 x i64> %imm.splat)
648 ret <vscale x 2 x i64> %out
655 define <vscale x 16 x i8> @smulh_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
656 ; SVE1-LABEL: smulh_i8:
658 ; SVE1-NEXT: smulh z0.b, p0/m, z0.b, z1.b
661 ; SVE2-LABEL: smulh_i8:
663 ; SVE2-NEXT: smulh z0.b, z0.b, z1.b
665 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.smulh.u.nxv16i8(<vscale x 16 x i1> %pg,
666 <vscale x 16 x i8> %a,
667 <vscale x 16 x i8> %b)
668 ret <vscale x 16 x i8> %out
671 define <vscale x 8 x i16> @smulh_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
672 ; SVE1-LABEL: smulh_i16:
674 ; SVE1-NEXT: smulh z0.h, p0/m, z0.h, z1.h
677 ; SVE2-LABEL: smulh_i16:
679 ; SVE2-NEXT: smulh z0.h, z0.h, z1.h
681 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.smulh.u.nxv8i16(<vscale x 8 x i1> %pg,
682 <vscale x 8 x i16> %a,
683 <vscale x 8 x i16> %b)
684 ret <vscale x 8 x i16> %out
687 define <vscale x 4 x i32> @smulh_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
688 ; SVE1-LABEL: smulh_i32:
690 ; SVE1-NEXT: smulh z0.s, p0/m, z0.s, z1.s
693 ; SVE2-LABEL: smulh_i32:
695 ; SVE2-NEXT: smulh z0.s, z0.s, z1.s
697 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.smulh.u.nxv4i32(<vscale x 4 x i1> %pg,
698 <vscale x 4 x i32> %a,
699 <vscale x 4 x i32> %b)
700 ret <vscale x 4 x i32> %out
703 define <vscale x 2 x i64> @smulh_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
704 ; SVE1-LABEL: smulh_i64:
706 ; SVE1-NEXT: smulh z0.d, p0/m, z0.d, z1.d
709 ; SVE2-LABEL: smulh_i64:
711 ; SVE2-NEXT: smulh z0.d, z0.d, z1.d
713 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.smulh.u.nxv2i64(<vscale x 2 x i1> %pg,
714 <vscale x 2 x i64> %a,
715 <vscale x 2 x i64> %b)
716 ret <vscale x 2 x i64> %out
723 define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
724 ; CHECK-LABEL: sub_i8:
726 ; CHECK-NEXT: sub z0.b, z0.b, z1.b
728 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.u.nxv16i8(<vscale x 16 x i1> %pg,
729 <vscale x 16 x i8> %a,
730 <vscale x 16 x i8> %b)
731 ret <vscale x 16 x i8> %out
734 define <vscale x 8 x i16> @sub_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
735 ; CHECK-LABEL: sub_i16:
737 ; CHECK-NEXT: sub z0.h, z0.h, z1.h
739 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.u.nxv8i16(<vscale x 8 x i1> %pg,
740 <vscale x 8 x i16> %a,
741 <vscale x 8 x i16> %b)
742 ret <vscale x 8 x i16> %out
745 define <vscale x 4 x i32> @sub_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
746 ; CHECK-LABEL: sub_i32:
748 ; CHECK-NEXT: sub z0.s, z0.s, z1.s
750 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.u.nxv4i32(<vscale x 4 x i1> %pg,
751 <vscale x 4 x i32> %a,
752 <vscale x 4 x i32> %b)
753 ret <vscale x 4 x i32> %out
756 define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
757 ; CHECK-LABEL: sub_i64:
759 ; CHECK-NEXT: sub z0.d, z0.d, z1.d
761 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.u.nxv2i64(<vscale x 2 x i1> %pg,
762 <vscale x 2 x i64> %a,
763 <vscale x 2 x i64> %b)
764 ret <vscale x 2 x i64> %out
771 define <vscale x 16 x i8> @sub_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
772 ; CHECK-LABEL: sub_imm_i8:
774 ; CHECK-NEXT: sub z0.b, z0.b, #3 // =0x3
776 %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
777 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
778 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.u.nxv16i8(<vscale x 16 x i1> %pg,
779 <vscale x 16 x i8> %a,
780 <vscale x 16 x i8> %imm.splat)
781 ret <vscale x 16 x i8> %out
784 define <vscale x 8 x i16> @sub_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
785 ; CHECK-LABEL: sub_imm_i16:
787 ; CHECK-NEXT: sub z0.h, z0.h, #4 // =0x4
789 %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
790 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
791 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.u.nxv8i16(<vscale x 8 x i1> %pg,
792 <vscale x 8 x i16> %a,
793 <vscale x 8 x i16> %imm.splat)
794 ret <vscale x 8 x i16> %out
797 define <vscale x 4 x i32> @sub_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
798 ; CHECK-LABEL: sub_imm_i32:
800 ; CHECK-NEXT: sub z0.s, z0.s, #5 // =0x5
802 %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
803 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
804 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.u.nxv4i32(<vscale x 4 x i1> %pg,
805 <vscale x 4 x i32> %a,
806 <vscale x 4 x i32> %imm.splat)
807 ret <vscale x 4 x i32> %out
810 define <vscale x 2 x i64> @sub_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
811 ; CHECK-LABEL: sub_imm_i64:
813 ; CHECK-NEXT: sub z0.d, z0.d, #6 // =0x6
815 %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
816 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
817 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.u.nxv2i64(<vscale x 2 x i1> %pg,
818 <vscale x 2 x i64> %a,
819 <vscale x 2 x i64> %imm.splat)
820 ret <vscale x 2 x i64> %out
827 define <vscale x 16 x i8> @subr_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
828 ; CHECK-LABEL: subr_i8:
830 ; CHECK-NEXT: sub z0.b, z1.b, z0.b
832 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.u.nxv16i8(<vscale x 16 x i1> %pg,
833 <vscale x 16 x i8> %b,
834 <vscale x 16 x i8> %a)
835 ret <vscale x 16 x i8> %out
838 define <vscale x 8 x i16> @subr_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
839 ; CHECK-LABEL: subr_i16:
841 ; CHECK-NEXT: sub z0.h, z1.h, z0.h
843 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.u.nxv8i16(<vscale x 8 x i1> %pg,
844 <vscale x 8 x i16> %b,
845 <vscale x 8 x i16> %a)
846 ret <vscale x 8 x i16> %out
849 define <vscale x 4 x i32> @subr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
850 ; CHECK-LABEL: subr_i32:
852 ; CHECK-NEXT: sub z0.s, z1.s, z0.s
854 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.u.nxv4i32(<vscale x 4 x i1> %pg,
855 <vscale x 4 x i32> %b,
856 <vscale x 4 x i32> %a)
857 ret <vscale x 4 x i32> %out
860 define <vscale x 2 x i64> @subr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
861 ; CHECK-LABEL: subr_i64:
863 ; CHECK-NEXT: sub z0.d, z1.d, z0.d
865 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.u.nxv2i64(<vscale x 2 x i1> %pg,
866 <vscale x 2 x i64> %b,
867 <vscale x 2 x i64> %a)
868 ret <vscale x 2 x i64> %out
875 define <vscale x 16 x i8> @subr_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
876 ; CHECK-LABEL: subr_imm_i8:
878 ; CHECK-NEXT: subr z0.b, z0.b, #3 // =0x3
880 %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
881 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
882 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sub.u.nxv16i8(<vscale x 16 x i1> %pg,
883 <vscale x 16 x i8> %imm.splat,
884 <vscale x 16 x i8> %a)
885 ret <vscale x 16 x i8> %out
888 define <vscale x 8 x i16> @subr_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
889 ; CHECK-LABEL: subr_imm_i16:
891 ; CHECK-NEXT: subr z0.h, z0.h, #4 // =0x4
893 %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
894 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
895 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sub.u.nxv8i16(<vscale x 8 x i1> %pg,
896 <vscale x 8 x i16> %imm.splat,
897 <vscale x 8 x i16> %a)
898 ret <vscale x 8 x i16> %out
901 define <vscale x 4 x i32> @subr_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
902 ; CHECK-LABEL: subr_imm_i32:
904 ; CHECK-NEXT: subr z0.s, z0.s, #5 // =0x5
906 %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
907 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
908 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sub.u.nxv4i32(<vscale x 4 x i1> %pg,
909 <vscale x 4 x i32> %imm.splat,
910 <vscale x 4 x i32> %a)
911 ret <vscale x 4 x i32> %out
914 define <vscale x 2 x i64> @subr_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
915 ; CHECK-LABEL: subr_imm_i64:
917 ; CHECK-NEXT: subr z0.d, z0.d, #6 // =0x6
919 %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
920 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
921 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sub.u.nxv2i64(<vscale x 2 x i1> %pg,
922 <vscale x 2 x i64> %imm.splat,
923 <vscale x 2 x i64> %a)
924 ret <vscale x 2 x i64> %out
931 define <vscale x 16 x i8> @uabd_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
932 ; CHECK-LABEL: uabd_i8:
934 ; CHECK-NEXT: ptrue p0.b
935 ; CHECK-NEXT: uabd z0.b, p0/m, z0.b, z1.b
937 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uabd.u.nxv16i8(<vscale x 16 x i1> %pg,
938 <vscale x 16 x i8> %a,
939 <vscale x 16 x i8> %b)
940 ret <vscale x 16 x i8> %out
943 define <vscale x 8 x i16> @uabd_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
944 ; CHECK-LABEL: uabd_i16:
946 ; CHECK-NEXT: ptrue p0.h
947 ; CHECK-NEXT: uabd z0.h, p0/m, z0.h, z1.h
949 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uabd.u.nxv8i16(<vscale x 8 x i1> %pg,
950 <vscale x 8 x i16> %a,
951 <vscale x 8 x i16> %b)
952 ret <vscale x 8 x i16> %out
955 define <vscale x 4 x i32> @uabd_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
956 ; CHECK-LABEL: uabd_i32:
958 ; CHECK-NEXT: ptrue p0.s
959 ; CHECK-NEXT: uabd z0.s, p0/m, z0.s, z1.s
961 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uabd.u.nxv4i32(<vscale x 4 x i1> %pg,
962 <vscale x 4 x i32> %a,
963 <vscale x 4 x i32> %b)
964 ret <vscale x 4 x i32> %out
967 define <vscale x 2 x i64> @uabd_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
968 ; CHECK-LABEL: uabd_i64:
970 ; CHECK-NEXT: ptrue p0.d
971 ; CHECK-NEXT: uabd z0.d, p0/m, z0.d, z1.d
973 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uabd.u.nxv2i64(<vscale x 2 x i1> %pg,
974 <vscale x 2 x i64> %a,
975 <vscale x 2 x i64> %b)
976 ret <vscale x 2 x i64> %out
983 define <vscale x 4 x i32> @udiv_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
984 ; CHECK-LABEL: udiv_i32:
986 ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s
988 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.udiv.u.nxv4i32(<vscale x 4 x i1> %pg,
989 <vscale x 4 x i32> %a,
990 <vscale x 4 x i32> %b)
991 ret <vscale x 4 x i32> %out
994 define <vscale x 2 x i64> @udiv_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
995 ; CHECK-LABEL: udiv_i64:
997 ; CHECK-NEXT: udiv z0.d, p0/m, z0.d, z1.d
999 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.udiv.u.nxv2i64(<vscale x 2 x i1> %pg,
1000 <vscale x 2 x i64> %a,
1001 <vscale x 2 x i64> %b)
1002 ret <vscale x 2 x i64> %out
1009 define <vscale x 4 x i32> @udivr_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
1010 ; CHECK-LABEL: udivr_i32:
1012 ; CHECK-NEXT: udivr z0.s, p0/m, z0.s, z1.s
1014 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.udiv.u.nxv4i32(<vscale x 4 x i1> %pg,
1015 <vscale x 4 x i32> %b,
1016 <vscale x 4 x i32> %a)
1017 ret <vscale x 4 x i32> %out
1020 define <vscale x 2 x i64> @udivr_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
1021 ; CHECK-LABEL: udivr_i64:
1023 ; CHECK-NEXT: udivr z0.d, p0/m, z0.d, z1.d
1025 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.udiv.u.nxv2i64(<vscale x 2 x i1> %pg,
1026 <vscale x 2 x i64> %b,
1027 <vscale x 2 x i64> %a)
1028 ret <vscale x 2 x i64> %out
1035 define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
1036 ; CHECK-LABEL: umax_i8:
1038 ; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b
1040 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.u.nxv16i8(<vscale x 16 x i1> %pg,
1041 <vscale x 16 x i8> %a,
1042 <vscale x 16 x i8> %b)
1043 ret <vscale x 16 x i8> %out
1046 define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
1047 ; CHECK-LABEL: umax_i16:
1049 ; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h
1051 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.u.nxv8i16(<vscale x 8 x i1> %pg,
1052 <vscale x 8 x i16> %a,
1053 <vscale x 8 x i16> %b)
1054 ret <vscale x 8 x i16> %out
1057 define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
1058 ; CHECK-LABEL: umax_i32:
1060 ; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s
1062 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.u.nxv4i32(<vscale x 4 x i1> %pg,
1063 <vscale x 4 x i32> %a,
1064 <vscale x 4 x i32> %b)
1065 ret <vscale x 4 x i32> %out
1068 define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
1069 ; CHECK-LABEL: umax_i64:
1071 ; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d
1073 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.u.nxv2i64(<vscale x 2 x i1> %pg,
1074 <vscale x 2 x i64> %a,
1075 <vscale x 2 x i64> %b)
1076 ret <vscale x 2 x i64> %out
1083 define <vscale x 16 x i8> @umax_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1084 ; CHECK-LABEL: umax_imm_i8:
1086 ; CHECK-NEXT: umax z0.b, z0.b, #3
1088 %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
1089 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1090 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.u.nxv16i8(<vscale x 16 x i1> %pg,
1091 <vscale x 16 x i8> %a,
1092 <vscale x 16 x i8> %imm.splat)
1093 ret <vscale x 16 x i8> %out
1096 define <vscale x 8 x i16> @umax_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1097 ; CHECK-LABEL: umax_imm_i16:
1099 ; CHECK-NEXT: umax z0.h, z0.h, #4
1101 %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
1102 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1103 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.u.nxv8i16(<vscale x 8 x i1> %pg,
1104 <vscale x 8 x i16> %a,
1105 <vscale x 8 x i16> %imm.splat)
1106 ret <vscale x 8 x i16> %out
1109 define <vscale x 4 x i32> @umax_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1110 ; CHECK-LABEL: umax_imm_i32:
1112 ; CHECK-NEXT: umax z0.s, z0.s, #5
1114 %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
1115 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1116 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.u.nxv4i32(<vscale x 4 x i1> %pg,
1117 <vscale x 4 x i32> %a,
1118 <vscale x 4 x i32> %imm.splat)
1119 ret <vscale x 4 x i32> %out
1122 define <vscale x 2 x i64> @umax_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1123 ; CHECK-LABEL: umax_imm_i64:
1125 ; CHECK-NEXT: umax z0.d, z0.d, #6
1127 %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
1128 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1129 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.u.nxv2i64(<vscale x 2 x i1> %pg,
1130 <vscale x 2 x i64> %a,
1131 <vscale x 2 x i64> %imm.splat)
1132 ret <vscale x 2 x i64> %out
1139 define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
1140 ; CHECK-LABEL: umin_i8:
1142 ; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b
1144 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.u.nxv16i8(<vscale x 16 x i1> %pg,
1145 <vscale x 16 x i8> %a,
1146 <vscale x 16 x i8> %b)
1147 ret <vscale x 16 x i8> %out
1150 define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
1151 ; CHECK-LABEL: umin_i16:
1153 ; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h
1155 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.u.nxv8i16(<vscale x 8 x i1> %pg,
1156 <vscale x 8 x i16> %a,
1157 <vscale x 8 x i16> %b)
1158 ret <vscale x 8 x i16> %out
1161 define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
1162 ; CHECK-LABEL: umin_i32:
1164 ; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s
1166 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.u.nxv4i32(<vscale x 4 x i1> %pg,
1167 <vscale x 4 x i32> %a,
1168 <vscale x 4 x i32> %b)
1169 ret <vscale x 4 x i32> %out
1172 define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
1173 ; CHECK-LABEL: umin_i64:
1175 ; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d
1177 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.u.nxv2i64(<vscale x 2 x i1> %pg,
1178 <vscale x 2 x i64> %a,
1179 <vscale x 2 x i64> %b)
1180 ret <vscale x 2 x i64> %out
1187 define <vscale x 16 x i8> @umin_imm_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
1188 ; CHECK-LABEL: umin_imm_i8:
1190 ; CHECK-NEXT: umin z0.b, z0.b, #3
1192 %imm = insertelement <vscale x 16 x i8> undef, i8 3, i32 0
1193 %imm.splat = shufflevector <vscale x 16 x i8> %imm, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
1194 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.u.nxv16i8(<vscale x 16 x i1> %pg,
1195 <vscale x 16 x i8> %a,
1196 <vscale x 16 x i8> %imm.splat)
1197 ret <vscale x 16 x i8> %out
1200 define <vscale x 8 x i16> @umin_imm_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
1201 ; CHECK-LABEL: umin_imm_i16:
1203 ; CHECK-NEXT: umin z0.h, z0.h, #4
1205 %imm = insertelement <vscale x 8 x i16> undef, i16 4, i32 0
1206 %imm.splat = shufflevector <vscale x 8 x i16> %imm, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
1207 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.u.nxv8i16(<vscale x 8 x i1> %pg,
1208 <vscale x 8 x i16> %a,
1209 <vscale x 8 x i16> %imm.splat)
1210 ret <vscale x 8 x i16> %out
1213 define <vscale x 4 x i32> @umin_imm_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
1214 ; CHECK-LABEL: umin_imm_i32:
1216 ; CHECK-NEXT: umin z0.s, z0.s, #5
1218 %imm = insertelement <vscale x 4 x i32> undef, i32 5, i32 0
1219 %imm.splat = shufflevector <vscale x 4 x i32> %imm, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
1220 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.u.nxv4i32(<vscale x 4 x i1> %pg,
1221 <vscale x 4 x i32> %a,
1222 <vscale x 4 x i32> %imm.splat)
1223 ret <vscale x 4 x i32> %out
1226 define <vscale x 2 x i64> @umin_imm_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
1227 ; CHECK-LABEL: umin_imm_i64:
1229 ; CHECK-NEXT: umin z0.d, z0.d, #6
1231 %imm = insertelement <vscale x 2 x i64> undef, i64 6, i32 0
1232 %imm.splat = shufflevector <vscale x 2 x i64> %imm, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
1233 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.u.nxv2i64(<vscale x 2 x i1> %pg,
1234 <vscale x 2 x i64> %a,
1235 <vscale x 2 x i64> %imm.splat)
1236 ret <vscale x 2 x i64> %out
1243 define <vscale x 16 x i8> @umulh_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
1244 ; SVE1-LABEL: umulh_i8:
1246 ; SVE1-NEXT: umulh z0.b, p0/m, z0.b, z1.b
1249 ; SVE2-LABEL: umulh_i8:
1251 ; SVE2-NEXT: umulh z0.b, z0.b, z1.b
1253 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.umulh.u.nxv16i8(<vscale x 16 x i1> %pg,
1254 <vscale x 16 x i8> %a,
1255 <vscale x 16 x i8> %b)
1256 ret <vscale x 16 x i8> %out
1259 define <vscale x 8 x i16> @umulh_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
1260 ; SVE1-LABEL: umulh_i16:
1262 ; SVE1-NEXT: umulh z0.h, p0/m, z0.h, z1.h
1265 ; SVE2-LABEL: umulh_i16:
1267 ; SVE2-NEXT: umulh z0.h, z0.h, z1.h
1269 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.umulh.u.nxv8i16(<vscale x 8 x i1> %pg,
1270 <vscale x 8 x i16> %a,
1271 <vscale x 8 x i16> %b)
1272 ret <vscale x 8 x i16> %out
1275 define <vscale x 4 x i32> @umulh_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
1276 ; SVE1-LABEL: umulh_i32:
1278 ; SVE1-NEXT: umulh z0.s, p0/m, z0.s, z1.s
1281 ; SVE2-LABEL: umulh_i32:
1283 ; SVE2-NEXT: umulh z0.s, z0.s, z1.s
1285 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.umulh.u.nxv4i32(<vscale x 4 x i1> %pg,
1286 <vscale x 4 x i32> %a,
1287 <vscale x 4 x i32> %b)
1288 ret <vscale x 4 x i32> %out
1291 define <vscale x 2 x i64> @umulh_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
1292 ; SVE1-LABEL: umulh_i64:
1294 ; SVE1-NEXT: umulh z0.d, p0/m, z0.d, z1.d
1297 ; SVE2-LABEL: umulh_i64:
1299 ; SVE2-NEXT: umulh z0.d, z0.d, z1.d
1301 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.umulh.u.nxv2i64(<vscale x 2 x i1> %pg,
1302 <vscale x 2 x i64> %a,
1303 <vscale x 2 x i64> %b)
1304 ret <vscale x 2 x i64> %out
1308 declare <vscale x 16 x i8> @llvm.aarch64.sve.add.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1309 declare <vscale x 8 x i16> @llvm.aarch64.sve.add.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1310 declare <vscale x 4 x i32> @llvm.aarch64.sve.add.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1311 declare <vscale x 2 x i64> @llvm.aarch64.sve.add.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1313 declare <vscale x 16 x i8> @llvm.aarch64.sve.mla.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1314 declare <vscale x 8 x i16> @llvm.aarch64.sve.mla.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1315 declare <vscale x 4 x i32> @llvm.aarch64.sve.mla.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1316 declare <vscale x 2 x i64> @llvm.aarch64.sve.mla.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1318 declare <vscale x 16 x i8> @llvm.aarch64.sve.mls.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1319 declare <vscale x 8 x i16> @llvm.aarch64.sve.mls.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1320 declare <vscale x 4 x i32> @llvm.aarch64.sve.mls.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1321 declare <vscale x 2 x i64> @llvm.aarch64.sve.mls.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1323 declare <vscale x 16 x i8> @llvm.aarch64.sve.mul.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1324 declare <vscale x 8 x i16> @llvm.aarch64.sve.mul.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1325 declare <vscale x 4 x i32> @llvm.aarch64.sve.mul.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1326 declare <vscale x 2 x i64> @llvm.aarch64.sve.mul.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1328 declare <vscale x 16 x i8> @llvm.aarch64.sve.sabd.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1329 declare <vscale x 8 x i16> @llvm.aarch64.sve.sabd.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1330 declare <vscale x 4 x i32> @llvm.aarch64.sve.sabd.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1331 declare <vscale x 2 x i64> @llvm.aarch64.sve.sabd.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1333 declare <vscale x 4 x i32> @llvm.aarch64.sve.sdiv.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1334 declare <vscale x 2 x i64> @llvm.aarch64.sve.sdiv.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1336 declare <vscale x 16 x i8> @llvm.aarch64.sve.smax.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1337 declare <vscale x 8 x i16> @llvm.aarch64.sve.smax.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1338 declare <vscale x 4 x i32> @llvm.aarch64.sve.smax.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1339 declare <vscale x 2 x i64> @llvm.aarch64.sve.smax.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1341 declare <vscale x 16 x i8> @llvm.aarch64.sve.smin.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1342 declare <vscale x 8 x i16> @llvm.aarch64.sve.smin.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1343 declare <vscale x 4 x i32> @llvm.aarch64.sve.smin.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1344 declare <vscale x 2 x i64> @llvm.aarch64.sve.smin.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1346 declare <vscale x 16 x i8> @llvm.aarch64.sve.smulh.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1347 declare <vscale x 8 x i16> @llvm.aarch64.sve.smulh.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1348 declare <vscale x 4 x i32> @llvm.aarch64.sve.smulh.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1349 declare <vscale x 2 x i64> @llvm.aarch64.sve.smulh.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1351 declare <vscale x 16 x i8> @llvm.aarch64.sve.sub.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1352 declare <vscale x 8 x i16> @llvm.aarch64.sve.sub.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1353 declare <vscale x 4 x i32> @llvm.aarch64.sve.sub.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1354 declare <vscale x 2 x i64> @llvm.aarch64.sve.sub.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1356 declare <vscale x 16 x i8> @llvm.aarch64.sve.uabd.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1357 declare <vscale x 8 x i16> @llvm.aarch64.sve.uabd.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1358 declare <vscale x 4 x i32> @llvm.aarch64.sve.uabd.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1359 declare <vscale x 2 x i64> @llvm.aarch64.sve.uabd.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1361 declare <vscale x 4 x i32> @llvm.aarch64.sve.udiv.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1362 declare <vscale x 2 x i64> @llvm.aarch64.sve.udiv.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1364 declare <vscale x 16 x i8> @llvm.aarch64.sve.umax.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1365 declare <vscale x 8 x i16> @llvm.aarch64.sve.umax.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1366 declare <vscale x 4 x i32> @llvm.aarch64.sve.umax.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1367 declare <vscale x 2 x i64> @llvm.aarch64.sve.umax.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1369 declare <vscale x 16 x i8> @llvm.aarch64.sve.umin.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1370 declare <vscale x 8 x i16> @llvm.aarch64.sve.umin.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1371 declare <vscale x 4 x i32> @llvm.aarch64.sve.umin.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1372 declare <vscale x 2 x i64> @llvm.aarch64.sve.umin.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
1374 declare <vscale x 16 x i8> @llvm.aarch64.sve.umulh.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
1375 declare <vscale x 8 x i16> @llvm.aarch64.sve.umulh.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
1376 declare <vscale x 4 x i32> @llvm.aarch64.sve.umulh.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
1377 declare <vscale x 2 x i64> @llvm.aarch64.sve.umulh.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)