1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=aarch64 -mattr=+sve2 -verify-machineinstrs %s -o - | FileCheck %s
4 define <vscale x 2 x i64> @add_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
5 ; CHECK-LABEL: add_nxv2i64_x:
6 ; CHECK: // %bb.0: // %entry
7 ; CHECK-NEXT: ptrue p0.d
8 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
9 ; CHECK-NEXT: add z0.d, p0/m, z0.d, z1.d
12 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
13 %a = add <vscale x 2 x i64> %x, %y
14 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
15 ret <vscale x 2 x i64> %b
18 define <vscale x 4 x i32> @add_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
19 ; CHECK-LABEL: add_nxv4i32_x:
20 ; CHECK: // %bb.0: // %entry
21 ; CHECK-NEXT: ptrue p0.s
22 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
23 ; CHECK-NEXT: add z0.s, p0/m, z0.s, z1.s
26 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
27 %a = add <vscale x 4 x i32> %x, %y
28 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
29 ret <vscale x 4 x i32> %b
32 define <vscale x 8 x i16> @add_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
33 ; CHECK-LABEL: add_nxv8i16_x:
34 ; CHECK: // %bb.0: // %entry
35 ; CHECK-NEXT: ptrue p0.h
36 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
37 ; CHECK-NEXT: add z0.h, p0/m, z0.h, z1.h
40 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
41 %a = add <vscale x 8 x i16> %x, %y
42 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
43 ret <vscale x 8 x i16> %b
46 define <vscale x 16 x i8> @add_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
47 ; CHECK-LABEL: add_nxv16i8_x:
48 ; CHECK: // %bb.0: // %entry
49 ; CHECK-NEXT: ptrue p0.b
50 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
51 ; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b
54 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
55 %a = add <vscale x 16 x i8> %x, %y
56 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
57 ret <vscale x 16 x i8> %b
60 define <vscale x 2 x i64> @sub_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
61 ; CHECK-LABEL: sub_nxv2i64_x:
62 ; CHECK: // %bb.0: // %entry
63 ; CHECK-NEXT: ptrue p0.d
64 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
65 ; CHECK-NEXT: sub z0.d, p0/m, z0.d, z1.d
68 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
69 %a = sub <vscale x 2 x i64> %x, %y
70 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
71 ret <vscale x 2 x i64> %b
74 define <vscale x 4 x i32> @sub_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
75 ; CHECK-LABEL: sub_nxv4i32_x:
76 ; CHECK: // %bb.0: // %entry
77 ; CHECK-NEXT: ptrue p0.s
78 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
79 ; CHECK-NEXT: sub z0.s, p0/m, z0.s, z1.s
82 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
83 %a = sub <vscale x 4 x i32> %x, %y
84 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
85 ret <vscale x 4 x i32> %b
88 define <vscale x 8 x i16> @sub_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
89 ; CHECK-LABEL: sub_nxv8i16_x:
90 ; CHECK: // %bb.0: // %entry
91 ; CHECK-NEXT: ptrue p0.h
92 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
93 ; CHECK-NEXT: sub z0.h, p0/m, z0.h, z1.h
96 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
97 %a = sub <vscale x 8 x i16> %x, %y
98 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
99 ret <vscale x 8 x i16> %b
102 define <vscale x 16 x i8> @sub_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
103 ; CHECK-LABEL: sub_nxv16i8_x:
104 ; CHECK: // %bb.0: // %entry
105 ; CHECK-NEXT: ptrue p0.b
106 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
107 ; CHECK-NEXT: sub z0.b, p0/m, z0.b, z1.b
110 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
111 %a = sub <vscale x 16 x i8> %x, %y
112 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
113 ret <vscale x 16 x i8> %b
116 define <vscale x 2 x i64> @mul_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
117 ; CHECK-LABEL: mul_nxv2i64_x:
118 ; CHECK: // %bb.0: // %entry
119 ; CHECK-NEXT: ptrue p0.d
120 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
121 ; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
124 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
125 %a = mul <vscale x 2 x i64> %x, %y
126 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
127 ret <vscale x 2 x i64> %b
130 define <vscale x 4 x i32> @mul_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
131 ; CHECK-LABEL: mul_nxv4i32_x:
132 ; CHECK: // %bb.0: // %entry
133 ; CHECK-NEXT: ptrue p0.s
134 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
135 ; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s
138 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
139 %a = mul <vscale x 4 x i32> %x, %y
140 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
141 ret <vscale x 4 x i32> %b
144 define <vscale x 8 x i16> @mul_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
145 ; CHECK-LABEL: mul_nxv8i16_x:
146 ; CHECK: // %bb.0: // %entry
147 ; CHECK-NEXT: ptrue p0.h
148 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
149 ; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h
152 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
153 %a = mul <vscale x 8 x i16> %x, %y
154 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
155 ret <vscale x 8 x i16> %b
158 define <vscale x 16 x i8> @mul_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
159 ; CHECK-LABEL: mul_nxv16i8_x:
160 ; CHECK: // %bb.0: // %entry
161 ; CHECK-NEXT: ptrue p0.b
162 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
163 ; CHECK-NEXT: mul z0.b, p0/m, z0.b, z1.b
166 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
167 %a = mul <vscale x 16 x i8> %x, %y
168 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
169 ret <vscale x 16 x i8> %b
172 define <vscale x 2 x i64> @sdiv_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
173 ; CHECK-LABEL: sdiv_nxv2i64_x:
174 ; CHECK: // %bb.0: // %entry
175 ; CHECK-NEXT: ptrue p0.d
176 ; CHECK-NEXT: sdivr z1.d, p0/m, z1.d, z0.d
177 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
178 ; CHECK-NEXT: mov z0.d, p0/m, z1.d
181 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
182 %a = sdiv <vscale x 2 x i64> %x, %y
183 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
184 ret <vscale x 2 x i64> %b
187 define <vscale x 4 x i32> @sdiv_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
188 ; CHECK-LABEL: sdiv_nxv4i32_x:
189 ; CHECK: // %bb.0: // %entry
190 ; CHECK-NEXT: ptrue p0.s
191 ; CHECK-NEXT: sdivr z1.s, p0/m, z1.s, z0.s
192 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
193 ; CHECK-NEXT: mov z0.s, p0/m, z1.s
196 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
197 %a = sdiv <vscale x 4 x i32> %x, %y
198 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
199 ret <vscale x 4 x i32> %b
202 define <vscale x 8 x i16> @sdiv_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
203 ; CHECK-LABEL: sdiv_nxv8i16_x:
204 ; CHECK: // %bb.0: // %entry
205 ; CHECK-NEXT: ptrue p0.s
206 ; CHECK-NEXT: sunpkhi z3.s, z1.h
207 ; CHECK-NEXT: sunpkhi z4.s, z0.h
208 ; CHECK-NEXT: sunpklo z1.s, z1.h
209 ; CHECK-NEXT: sdivr z3.s, p0/m, z3.s, z4.s
210 ; CHECK-NEXT: sunpklo z4.s, z0.h
211 ; CHECK-NEXT: sdivr z1.s, p0/m, z1.s, z4.s
212 ; CHECK-NEXT: ptrue p0.h
213 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
214 ; CHECK-NEXT: uzp1 z1.h, z1.h, z3.h
215 ; CHECK-NEXT: mov z0.h, p0/m, z1.h
218 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
219 %a = sdiv <vscale x 8 x i16> %x, %y
220 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
221 ret <vscale x 8 x i16> %b
224 define <vscale x 16 x i8> @sdiv_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
225 ; CHECK-LABEL: sdiv_nxv16i8_x:
226 ; CHECK: // %bb.0: // %entry
227 ; CHECK-NEXT: sunpkhi z3.h, z1.b
228 ; CHECK-NEXT: sunpkhi z4.h, z0.b
229 ; CHECK-NEXT: ptrue p0.s
230 ; CHECK-NEXT: sunpklo z1.h, z1.b
231 ; CHECK-NEXT: sunpkhi z5.s, z3.h
232 ; CHECK-NEXT: sunpkhi z6.s, z4.h
233 ; CHECK-NEXT: sunpklo z3.s, z3.h
234 ; CHECK-NEXT: sunpklo z4.s, z4.h
235 ; CHECK-NEXT: sdivr z5.s, p0/m, z5.s, z6.s
236 ; CHECK-NEXT: sunpkhi z6.s, z1.h
237 ; CHECK-NEXT: sunpklo z1.s, z1.h
238 ; CHECK-NEXT: sdivr z3.s, p0/m, z3.s, z4.s
239 ; CHECK-NEXT: sunpklo z4.h, z0.b
240 ; CHECK-NEXT: sunpkhi z7.s, z4.h
241 ; CHECK-NEXT: sunpklo z4.s, z4.h
242 ; CHECK-NEXT: sdivr z6.s, p0/m, z6.s, z7.s
243 ; CHECK-NEXT: uzp1 z3.h, z3.h, z5.h
244 ; CHECK-NEXT: sdivr z1.s, p0/m, z1.s, z4.s
245 ; CHECK-NEXT: ptrue p0.b
246 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
247 ; CHECK-NEXT: uzp1 z1.h, z1.h, z6.h
248 ; CHECK-NEXT: uzp1 z1.b, z1.b, z3.b
249 ; CHECK-NEXT: mov z0.b, p0/m, z1.b
252 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
253 %a = sdiv <vscale x 16 x i8> %x, %y
254 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
255 ret <vscale x 16 x i8> %b
258 define <vscale x 2 x i64> @udiv_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
259 ; CHECK-LABEL: udiv_nxv2i64_x:
260 ; CHECK: // %bb.0: // %entry
261 ; CHECK-NEXT: ptrue p0.d
262 ; CHECK-NEXT: udivr z1.d, p0/m, z1.d, z0.d
263 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
264 ; CHECK-NEXT: mov z0.d, p0/m, z1.d
267 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
268 %a = udiv <vscale x 2 x i64> %x, %y
269 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
270 ret <vscale x 2 x i64> %b
273 define <vscale x 4 x i32> @udiv_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
274 ; CHECK-LABEL: udiv_nxv4i32_x:
275 ; CHECK: // %bb.0: // %entry
276 ; CHECK-NEXT: ptrue p0.s
277 ; CHECK-NEXT: udivr z1.s, p0/m, z1.s, z0.s
278 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
279 ; CHECK-NEXT: mov z0.s, p0/m, z1.s
282 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
283 %a = udiv <vscale x 4 x i32> %x, %y
284 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
285 ret <vscale x 4 x i32> %b
288 define <vscale x 8 x i16> @udiv_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
289 ; CHECK-LABEL: udiv_nxv8i16_x:
290 ; CHECK: // %bb.0: // %entry
291 ; CHECK-NEXT: ptrue p0.s
292 ; CHECK-NEXT: uunpkhi z3.s, z1.h
293 ; CHECK-NEXT: uunpkhi z4.s, z0.h
294 ; CHECK-NEXT: uunpklo z1.s, z1.h
295 ; CHECK-NEXT: udivr z3.s, p0/m, z3.s, z4.s
296 ; CHECK-NEXT: uunpklo z4.s, z0.h
297 ; CHECK-NEXT: udivr z1.s, p0/m, z1.s, z4.s
298 ; CHECK-NEXT: ptrue p0.h
299 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
300 ; CHECK-NEXT: uzp1 z1.h, z1.h, z3.h
301 ; CHECK-NEXT: mov z0.h, p0/m, z1.h
304 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
305 %a = udiv <vscale x 8 x i16> %x, %y
306 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
307 ret <vscale x 8 x i16> %b
310 define <vscale x 16 x i8> @udiv_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
311 ; CHECK-LABEL: udiv_nxv16i8_x:
312 ; CHECK: // %bb.0: // %entry
313 ; CHECK-NEXT: uunpkhi z3.h, z1.b
314 ; CHECK-NEXT: uunpkhi z4.h, z0.b
315 ; CHECK-NEXT: ptrue p0.s
316 ; CHECK-NEXT: uunpklo z1.h, z1.b
317 ; CHECK-NEXT: uunpkhi z5.s, z3.h
318 ; CHECK-NEXT: uunpkhi z6.s, z4.h
319 ; CHECK-NEXT: uunpklo z3.s, z3.h
320 ; CHECK-NEXT: uunpklo z4.s, z4.h
321 ; CHECK-NEXT: udivr z5.s, p0/m, z5.s, z6.s
322 ; CHECK-NEXT: uunpkhi z6.s, z1.h
323 ; CHECK-NEXT: uunpklo z1.s, z1.h
324 ; CHECK-NEXT: udivr z3.s, p0/m, z3.s, z4.s
325 ; CHECK-NEXT: uunpklo z4.h, z0.b
326 ; CHECK-NEXT: uunpkhi z7.s, z4.h
327 ; CHECK-NEXT: uunpklo z4.s, z4.h
328 ; CHECK-NEXT: udivr z6.s, p0/m, z6.s, z7.s
329 ; CHECK-NEXT: uzp1 z3.h, z3.h, z5.h
330 ; CHECK-NEXT: udivr z1.s, p0/m, z1.s, z4.s
331 ; CHECK-NEXT: ptrue p0.b
332 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
333 ; CHECK-NEXT: uzp1 z1.h, z1.h, z6.h
334 ; CHECK-NEXT: uzp1 z1.b, z1.b, z3.b
335 ; CHECK-NEXT: mov z0.b, p0/m, z1.b
338 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
339 %a = udiv <vscale x 16 x i8> %x, %y
340 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
341 ret <vscale x 16 x i8> %b
344 define <vscale x 2 x i64> @srem_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
345 ; CHECK-LABEL: srem_nxv2i64_x:
346 ; CHECK: // %bb.0: // %entry
347 ; CHECK-NEXT: ptrue p0.d
348 ; CHECK-NEXT: movprfx z3, z0
349 ; CHECK-NEXT: sdiv z3.d, p0/m, z3.d, z1.d
350 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
351 ; CHECK-NEXT: mls z0.d, p0/m, z3.d, z1.d
354 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
355 %a = srem <vscale x 2 x i64> %x, %y
356 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
357 ret <vscale x 2 x i64> %b
360 define <vscale x 4 x i32> @srem_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
361 ; CHECK-LABEL: srem_nxv4i32_x:
362 ; CHECK: // %bb.0: // %entry
363 ; CHECK-NEXT: ptrue p0.s
364 ; CHECK-NEXT: movprfx z3, z0
365 ; CHECK-NEXT: sdiv z3.s, p0/m, z3.s, z1.s
366 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
367 ; CHECK-NEXT: mls z0.s, p0/m, z3.s, z1.s
370 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
371 %a = srem <vscale x 4 x i32> %x, %y
372 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
373 ret <vscale x 4 x i32> %b
376 define <vscale x 8 x i16> @srem_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
377 ; CHECK-LABEL: srem_nxv8i16_x:
378 ; CHECK: // %bb.0: // %entry
379 ; CHECK-NEXT: ptrue p0.s
380 ; CHECK-NEXT: sunpkhi z3.s, z1.h
381 ; CHECK-NEXT: sunpkhi z4.s, z0.h
382 ; CHECK-NEXT: sunpklo z5.s, z0.h
383 ; CHECK-NEXT: sdivr z3.s, p0/m, z3.s, z4.s
384 ; CHECK-NEXT: sunpklo z4.s, z1.h
385 ; CHECK-NEXT: sdivr z4.s, p0/m, z4.s, z5.s
386 ; CHECK-NEXT: ptrue p0.h
387 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
388 ; CHECK-NEXT: uzp1 z3.h, z4.h, z3.h
389 ; CHECK-NEXT: mls z0.h, p0/m, z3.h, z1.h
392 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
393 %a = srem <vscale x 8 x i16> %x, %y
394 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
395 ret <vscale x 8 x i16> %b
398 define <vscale x 16 x i8> @srem_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
399 ; CHECK-LABEL: srem_nxv16i8_x:
400 ; CHECK: // %bb.0: // %entry
401 ; CHECK-NEXT: sunpkhi z3.h, z1.b
402 ; CHECK-NEXT: sunpkhi z4.h, z0.b
403 ; CHECK-NEXT: ptrue p0.s
404 ; CHECK-NEXT: sunpkhi z5.s, z3.h
405 ; CHECK-NEXT: sunpkhi z6.s, z4.h
406 ; CHECK-NEXT: sunpklo z3.s, z3.h
407 ; CHECK-NEXT: sunpklo z4.s, z4.h
408 ; CHECK-NEXT: sdivr z5.s, p0/m, z5.s, z6.s
409 ; CHECK-NEXT: sunpklo z6.h, z0.b
410 ; CHECK-NEXT: sunpkhi z24.s, z6.h
411 ; CHECK-NEXT: sunpklo z6.s, z6.h
412 ; CHECK-NEXT: sdivr z3.s, p0/m, z3.s, z4.s
413 ; CHECK-NEXT: sunpklo z4.h, z1.b
414 ; CHECK-NEXT: sunpkhi z7.s, z4.h
415 ; CHECK-NEXT: sunpklo z4.s, z4.h
416 ; CHECK-NEXT: sdivr z7.s, p0/m, z7.s, z24.s
417 ; CHECK-NEXT: uzp1 z3.h, z3.h, z5.h
418 ; CHECK-NEXT: sdivr z4.s, p0/m, z4.s, z6.s
419 ; CHECK-NEXT: ptrue p0.b
420 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
421 ; CHECK-NEXT: uzp1 z4.h, z4.h, z7.h
422 ; CHECK-NEXT: uzp1 z3.b, z4.b, z3.b
423 ; CHECK-NEXT: mls z0.b, p0/m, z3.b, z1.b
426 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
427 %a = srem <vscale x 16 x i8> %x, %y
428 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
429 ret <vscale x 16 x i8> %b
432 define <vscale x 2 x i64> @urem_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
433 ; CHECK-LABEL: urem_nxv2i64_x:
434 ; CHECK: // %bb.0: // %entry
435 ; CHECK-NEXT: ptrue p0.d
436 ; CHECK-NEXT: movprfx z3, z0
437 ; CHECK-NEXT: udiv z3.d, p0/m, z3.d, z1.d
438 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
439 ; CHECK-NEXT: mls z0.d, p0/m, z3.d, z1.d
442 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
443 %a = urem <vscale x 2 x i64> %x, %y
444 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
445 ret <vscale x 2 x i64> %b
448 define <vscale x 4 x i32> @urem_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
449 ; CHECK-LABEL: urem_nxv4i32_x:
450 ; CHECK: // %bb.0: // %entry
451 ; CHECK-NEXT: ptrue p0.s
452 ; CHECK-NEXT: movprfx z3, z0
453 ; CHECK-NEXT: udiv z3.s, p0/m, z3.s, z1.s
454 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
455 ; CHECK-NEXT: mls z0.s, p0/m, z3.s, z1.s
458 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
459 %a = urem <vscale x 4 x i32> %x, %y
460 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
461 ret <vscale x 4 x i32> %b
464 define <vscale x 8 x i16> @urem_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
465 ; CHECK-LABEL: urem_nxv8i16_x:
466 ; CHECK: // %bb.0: // %entry
467 ; CHECK-NEXT: ptrue p0.s
468 ; CHECK-NEXT: uunpkhi z3.s, z1.h
469 ; CHECK-NEXT: uunpkhi z4.s, z0.h
470 ; CHECK-NEXT: uunpklo z5.s, z0.h
471 ; CHECK-NEXT: udivr z3.s, p0/m, z3.s, z4.s
472 ; CHECK-NEXT: uunpklo z4.s, z1.h
473 ; CHECK-NEXT: udivr z4.s, p0/m, z4.s, z5.s
474 ; CHECK-NEXT: ptrue p0.h
475 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
476 ; CHECK-NEXT: uzp1 z3.h, z4.h, z3.h
477 ; CHECK-NEXT: mls z0.h, p0/m, z3.h, z1.h
480 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
481 %a = urem <vscale x 8 x i16> %x, %y
482 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
483 ret <vscale x 8 x i16> %b
486 define <vscale x 16 x i8> @urem_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
487 ; CHECK-LABEL: urem_nxv16i8_x:
488 ; CHECK: // %bb.0: // %entry
489 ; CHECK-NEXT: uunpkhi z3.h, z1.b
490 ; CHECK-NEXT: uunpkhi z4.h, z0.b
491 ; CHECK-NEXT: ptrue p0.s
492 ; CHECK-NEXT: uunpkhi z5.s, z3.h
493 ; CHECK-NEXT: uunpkhi z6.s, z4.h
494 ; CHECK-NEXT: uunpklo z3.s, z3.h
495 ; CHECK-NEXT: uunpklo z4.s, z4.h
496 ; CHECK-NEXT: udivr z5.s, p0/m, z5.s, z6.s
497 ; CHECK-NEXT: uunpklo z6.h, z0.b
498 ; CHECK-NEXT: uunpkhi z24.s, z6.h
499 ; CHECK-NEXT: uunpklo z6.s, z6.h
500 ; CHECK-NEXT: udivr z3.s, p0/m, z3.s, z4.s
501 ; CHECK-NEXT: uunpklo z4.h, z1.b
502 ; CHECK-NEXT: uunpkhi z7.s, z4.h
503 ; CHECK-NEXT: uunpklo z4.s, z4.h
504 ; CHECK-NEXT: udivr z7.s, p0/m, z7.s, z24.s
505 ; CHECK-NEXT: uzp1 z3.h, z3.h, z5.h
506 ; CHECK-NEXT: udivr z4.s, p0/m, z4.s, z6.s
507 ; CHECK-NEXT: ptrue p0.b
508 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
509 ; CHECK-NEXT: uzp1 z4.h, z4.h, z7.h
510 ; CHECK-NEXT: uzp1 z3.b, z4.b, z3.b
511 ; CHECK-NEXT: mls z0.b, p0/m, z3.b, z1.b
514 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
515 %a = urem <vscale x 16 x i8> %x, %y
516 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
517 ret <vscale x 16 x i8> %b
520 define <vscale x 2 x i64> @and_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
521 ; CHECK-LABEL: and_nxv2i64_x:
522 ; CHECK: // %bb.0: // %entry
523 ; CHECK-NEXT: ptrue p0.d
524 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
525 ; CHECK-NEXT: and z0.d, p0/m, z0.d, z1.d
528 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
529 %a = and <vscale x 2 x i64> %x, %y
530 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
531 ret <vscale x 2 x i64> %b
534 define <vscale x 4 x i32> @and_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
535 ; CHECK-LABEL: and_nxv4i32_x:
536 ; CHECK: // %bb.0: // %entry
537 ; CHECK-NEXT: ptrue p0.s
538 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
539 ; CHECK-NEXT: and z0.s, p0/m, z0.s, z1.s
542 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
543 %a = and <vscale x 4 x i32> %x, %y
544 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
545 ret <vscale x 4 x i32> %b
548 define <vscale x 8 x i16> @and_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
549 ; CHECK-LABEL: and_nxv8i16_x:
550 ; CHECK: // %bb.0: // %entry
551 ; CHECK-NEXT: ptrue p0.h
552 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
553 ; CHECK-NEXT: and z0.h, p0/m, z0.h, z1.h
556 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
557 %a = and <vscale x 8 x i16> %x, %y
558 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
559 ret <vscale x 8 x i16> %b
562 define <vscale x 16 x i8> @and_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
563 ; CHECK-LABEL: and_nxv16i8_x:
564 ; CHECK: // %bb.0: // %entry
565 ; CHECK-NEXT: ptrue p0.b
566 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
567 ; CHECK-NEXT: and z0.b, p0/m, z0.b, z1.b
570 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
571 %a = and <vscale x 16 x i8> %x, %y
572 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
573 ret <vscale x 16 x i8> %b
576 define <vscale x 2 x i64> @or_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
577 ; CHECK-LABEL: or_nxv2i64_x:
578 ; CHECK: // %bb.0: // %entry
579 ; CHECK-NEXT: ptrue p0.d
580 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
581 ; CHECK-NEXT: orr z0.d, p0/m, z0.d, z1.d
584 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
585 %a = or <vscale x 2 x i64> %x, %y
586 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
587 ret <vscale x 2 x i64> %b
590 define <vscale x 4 x i32> @or_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
591 ; CHECK-LABEL: or_nxv4i32_x:
592 ; CHECK: // %bb.0: // %entry
593 ; CHECK-NEXT: ptrue p0.s
594 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
595 ; CHECK-NEXT: orr z0.s, p0/m, z0.s, z1.s
598 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
599 %a = or <vscale x 4 x i32> %x, %y
600 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
601 ret <vscale x 4 x i32> %b
604 define <vscale x 8 x i16> @or_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
605 ; CHECK-LABEL: or_nxv8i16_x:
606 ; CHECK: // %bb.0: // %entry
607 ; CHECK-NEXT: ptrue p0.h
608 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
609 ; CHECK-NEXT: orr z0.h, p0/m, z0.h, z1.h
612 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
613 %a = or <vscale x 8 x i16> %x, %y
614 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
615 ret <vscale x 8 x i16> %b
618 define <vscale x 16 x i8> @or_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
619 ; CHECK-LABEL: or_nxv16i8_x:
620 ; CHECK: // %bb.0: // %entry
621 ; CHECK-NEXT: ptrue p0.b
622 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
623 ; CHECK-NEXT: orr z0.b, p0/m, z0.b, z1.b
626 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
627 %a = or <vscale x 16 x i8> %x, %y
628 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
629 ret <vscale x 16 x i8> %b
632 define <vscale x 2 x i64> @xor_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
633 ; CHECK-LABEL: xor_nxv2i64_x:
634 ; CHECK: // %bb.0: // %entry
635 ; CHECK-NEXT: ptrue p0.d
636 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
637 ; CHECK-NEXT: eor z0.d, p0/m, z0.d, z1.d
640 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
641 %a = xor <vscale x 2 x i64> %x, %y
642 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
643 ret <vscale x 2 x i64> %b
646 define <vscale x 4 x i32> @xor_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
647 ; CHECK-LABEL: xor_nxv4i32_x:
648 ; CHECK: // %bb.0: // %entry
649 ; CHECK-NEXT: ptrue p0.s
650 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
651 ; CHECK-NEXT: eor z0.s, p0/m, z0.s, z1.s
654 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
655 %a = xor <vscale x 4 x i32> %x, %y
656 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
657 ret <vscale x 4 x i32> %b
660 define <vscale x 8 x i16> @xor_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
661 ; CHECK-LABEL: xor_nxv8i16_x:
662 ; CHECK: // %bb.0: // %entry
663 ; CHECK-NEXT: ptrue p0.h
664 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
665 ; CHECK-NEXT: eor z0.h, p0/m, z0.h, z1.h
668 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
669 %a = xor <vscale x 8 x i16> %x, %y
670 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
671 ret <vscale x 8 x i16> %b
674 define <vscale x 16 x i8> @xor_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
675 ; CHECK-LABEL: xor_nxv16i8_x:
676 ; CHECK: // %bb.0: // %entry
677 ; CHECK-NEXT: ptrue p0.b
678 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
679 ; CHECK-NEXT: eor z0.b, p0/m, z0.b, z1.b
682 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
683 %a = xor <vscale x 16 x i8> %x, %y
684 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
685 ret <vscale x 16 x i8> %b
688 define <vscale x 2 x i64> @shl_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
689 ; CHECK-LABEL: shl_nxv2i64_x:
690 ; CHECK: // %bb.0: // %entry
691 ; CHECK-NEXT: ptrue p0.d
692 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
693 ; CHECK-NEXT: lslr z1.d, p0/m, z1.d, z0.d
694 ; CHECK-NEXT: mov z0.d, p1/m, z1.d
697 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
698 %a = shl <vscale x 2 x i64> %x, %y
699 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
700 ret <vscale x 2 x i64> %b
703 define <vscale x 4 x i32> @shl_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
704 ; CHECK-LABEL: shl_nxv4i32_x:
705 ; CHECK: // %bb.0: // %entry
706 ; CHECK-NEXT: ptrue p0.s
707 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
708 ; CHECK-NEXT: lslr z1.s, p0/m, z1.s, z0.s
709 ; CHECK-NEXT: mov z0.s, p1/m, z1.s
712 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
713 %a = shl <vscale x 4 x i32> %x, %y
714 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
715 ret <vscale x 4 x i32> %b
718 define <vscale x 8 x i16> @shl_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
719 ; CHECK-LABEL: shl_nxv8i16_x:
720 ; CHECK: // %bb.0: // %entry
721 ; CHECK-NEXT: ptrue p0.h
722 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
723 ; CHECK-NEXT: lslr z1.h, p0/m, z1.h, z0.h
724 ; CHECK-NEXT: mov z0.h, p1/m, z1.h
727 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
728 %a = shl <vscale x 8 x i16> %x, %y
729 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
730 ret <vscale x 8 x i16> %b
733 define <vscale x 16 x i8> @shl_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
734 ; CHECK-LABEL: shl_nxv16i8_x:
735 ; CHECK: // %bb.0: // %entry
736 ; CHECK-NEXT: ptrue p0.b
737 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
738 ; CHECK-NEXT: lslr z1.b, p0/m, z1.b, z0.b
739 ; CHECK-NEXT: mov z0.b, p1/m, z1.b
742 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
743 %a = shl <vscale x 16 x i8> %x, %y
744 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
745 ret <vscale x 16 x i8> %b
748 define <vscale x 2 x i64> @ashr_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
749 ; CHECK-LABEL: ashr_nxv2i64_x:
750 ; CHECK: // %bb.0: // %entry
751 ; CHECK-NEXT: ptrue p0.d
752 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
753 ; CHECK-NEXT: asrr z1.d, p0/m, z1.d, z0.d
754 ; CHECK-NEXT: mov z0.d, p1/m, z1.d
757 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
758 %a = ashr <vscale x 2 x i64> %x, %y
759 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
760 ret <vscale x 2 x i64> %b
763 define <vscale x 4 x i32> @ashr_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
764 ; CHECK-LABEL: ashr_nxv4i32_x:
765 ; CHECK: // %bb.0: // %entry
766 ; CHECK-NEXT: ptrue p0.s
767 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
768 ; CHECK-NEXT: asrr z1.s, p0/m, z1.s, z0.s
769 ; CHECK-NEXT: mov z0.s, p1/m, z1.s
772 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
773 %a = ashr <vscale x 4 x i32> %x, %y
774 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
775 ret <vscale x 4 x i32> %b
778 define <vscale x 8 x i16> @ashr_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
779 ; CHECK-LABEL: ashr_nxv8i16_x:
780 ; CHECK: // %bb.0: // %entry
781 ; CHECK-NEXT: ptrue p0.h
782 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
783 ; CHECK-NEXT: asrr z1.h, p0/m, z1.h, z0.h
784 ; CHECK-NEXT: mov z0.h, p1/m, z1.h
787 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
788 %a = ashr <vscale x 8 x i16> %x, %y
789 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
790 ret <vscale x 8 x i16> %b
793 define <vscale x 16 x i8> @ashr_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
794 ; CHECK-LABEL: ashr_nxv16i8_x:
795 ; CHECK: // %bb.0: // %entry
796 ; CHECK-NEXT: ptrue p0.b
797 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
798 ; CHECK-NEXT: asrr z1.b, p0/m, z1.b, z0.b
799 ; CHECK-NEXT: mov z0.b, p1/m, z1.b
802 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
803 %a = ashr <vscale x 16 x i8> %x, %y
804 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
805 ret <vscale x 16 x i8> %b
808 define <vscale x 2 x i64> @lshr_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
809 ; CHECK-LABEL: lshr_nxv2i64_x:
810 ; CHECK: // %bb.0: // %entry
811 ; CHECK-NEXT: ptrue p0.d
812 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
813 ; CHECK-NEXT: lsrr z1.d, p0/m, z1.d, z0.d
814 ; CHECK-NEXT: mov z0.d, p1/m, z1.d
817 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
818 %a = lshr <vscale x 2 x i64> %x, %y
819 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
820 ret <vscale x 2 x i64> %b
823 define <vscale x 4 x i32> @lshr_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
824 ; CHECK-LABEL: lshr_nxv4i32_x:
825 ; CHECK: // %bb.0: // %entry
826 ; CHECK-NEXT: ptrue p0.s
827 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
828 ; CHECK-NEXT: lsrr z1.s, p0/m, z1.s, z0.s
829 ; CHECK-NEXT: mov z0.s, p1/m, z1.s
832 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
833 %a = lshr <vscale x 4 x i32> %x, %y
834 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
835 ret <vscale x 4 x i32> %b
838 define <vscale x 8 x i16> @lshr_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
839 ; CHECK-LABEL: lshr_nxv8i16_x:
840 ; CHECK: // %bb.0: // %entry
841 ; CHECK-NEXT: ptrue p0.h
842 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
843 ; CHECK-NEXT: lsrr z1.h, p0/m, z1.h, z0.h
844 ; CHECK-NEXT: mov z0.h, p1/m, z1.h
847 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
848 %a = lshr <vscale x 8 x i16> %x, %y
849 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
850 ret <vscale x 8 x i16> %b
853 define <vscale x 16 x i8> @lshr_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
854 ; CHECK-LABEL: lshr_nxv16i8_x:
855 ; CHECK: // %bb.0: // %entry
856 ; CHECK-NEXT: ptrue p0.b
857 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
858 ; CHECK-NEXT: lsrr z1.b, p0/m, z1.b, z0.b
859 ; CHECK-NEXT: mov z0.b, p1/m, z1.b
862 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
863 %a = lshr <vscale x 16 x i8> %x, %y
864 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
865 ret <vscale x 16 x i8> %b
868 define <vscale x 2 x i64> @mla_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %z, <vscale x 2 x i64> %n) {
869 ; CHECK-LABEL: mla_nxv2i64_x:
870 ; CHECK: // %bb.0: // %entry
871 ; CHECK-NEXT: ptrue p0.d
872 ; CHECK-NEXT: cmpgt p0.d, p0/z, z3.d, #0
873 ; CHECK-NEXT: mla z0.d, p0/m, z1.d, z2.d
876 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
877 %m = mul <vscale x 2 x i64> %y, %z
878 %a = add <vscale x 2 x i64> %x, %m
879 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
880 ret <vscale x 2 x i64> %b
883 define <vscale x 4 x i32> @mla_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %z, <vscale x 4 x i32> %n) {
884 ; CHECK-LABEL: mla_nxv4i32_x:
885 ; CHECK: // %bb.0: // %entry
886 ; CHECK-NEXT: ptrue p0.s
887 ; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, #0
888 ; CHECK-NEXT: mla z0.s, p0/m, z1.s, z2.s
891 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
892 %m = mul <vscale x 4 x i32> %y, %z
893 %a = add <vscale x 4 x i32> %x, %m
894 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
895 ret <vscale x 4 x i32> %b
898 define <vscale x 8 x i16> @mla_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %z, <vscale x 8 x i16> %n) {
899 ; CHECK-LABEL: mla_nxv8i16_x:
900 ; CHECK: // %bb.0: // %entry
901 ; CHECK-NEXT: ptrue p0.h
902 ; CHECK-NEXT: cmpgt p0.h, p0/z, z3.h, #0
903 ; CHECK-NEXT: mla z0.h, p0/m, z1.h, z2.h
906 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
907 %m = mul <vscale x 8 x i16> %y, %z
908 %a = add <vscale x 8 x i16> %x, %m
909 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
910 ret <vscale x 8 x i16> %b
913 define <vscale x 16 x i8> @mla_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z, <vscale x 16 x i8> %n) {
914 ; CHECK-LABEL: mla_nxv16i8_x:
915 ; CHECK: // %bb.0: // %entry
916 ; CHECK-NEXT: ptrue p0.b
917 ; CHECK-NEXT: cmpgt p0.b, p0/z, z3.b, #0
918 ; CHECK-NEXT: mla z0.b, p0/m, z1.b, z2.b
921 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
922 %m = mul <vscale x 16 x i8> %y, %z
923 %a = add <vscale x 16 x i8> %x, %m
924 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
925 ret <vscale x 16 x i8> %b
928 define <vscale x 2 x i64> @mls_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %z, <vscale x 2 x i64> %n) {
929 ; CHECK-LABEL: mls_nxv2i64_x:
930 ; CHECK: // %bb.0: // %entry
931 ; CHECK-NEXT: ptrue p0.d
932 ; CHECK-NEXT: cmpgt p0.d, p0/z, z3.d, #0
933 ; CHECK-NEXT: msb z0.d, p0/m, z1.d, z2.d
936 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
937 %m = mul <vscale x 2 x i64> %x, %y
938 %a = sub <vscale x 2 x i64> %z, %m
939 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
940 ret <vscale x 2 x i64> %b
943 define <vscale x 4 x i32> @mls_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %z, <vscale x 4 x i32> %n) {
944 ; CHECK-LABEL: mls_nxv4i32_x:
945 ; CHECK: // %bb.0: // %entry
946 ; CHECK-NEXT: ptrue p0.s
947 ; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, #0
948 ; CHECK-NEXT: msb z0.s, p0/m, z1.s, z2.s
951 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
952 %m = mul <vscale x 4 x i32> %x, %y
953 %a = sub <vscale x 4 x i32> %z, %m
954 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
955 ret <vscale x 4 x i32> %b
958 define <vscale x 8 x i16> @mls_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %z, <vscale x 8 x i16> %n) {
959 ; CHECK-LABEL: mls_nxv8i16_x:
960 ; CHECK: // %bb.0: // %entry
961 ; CHECK-NEXT: ptrue p0.h
962 ; CHECK-NEXT: cmpgt p0.h, p0/z, z3.h, #0
963 ; CHECK-NEXT: msb z0.h, p0/m, z1.h, z2.h
966 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
967 %m = mul <vscale x 8 x i16> %x, %y
968 %a = sub <vscale x 8 x i16> %z, %m
969 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
970 ret <vscale x 8 x i16> %b
973 define <vscale x 16 x i8> @mls_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z, <vscale x 16 x i8> %n) {
974 ; CHECK-LABEL: mls_nxv16i8_x:
975 ; CHECK: // %bb.0: // %entry
976 ; CHECK-NEXT: ptrue p0.b
977 ; CHECK-NEXT: cmpgt p0.b, p0/z, z3.b, #0
978 ; CHECK-NEXT: msb z0.b, p0/m, z1.b, z2.b
981 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
982 %m = mul <vscale x 16 x i8> %x, %y
983 %a = sub <vscale x 16 x i8> %z, %m
984 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
985 ret <vscale x 16 x i8> %b
988 define <vscale x 4 x float> @fadd_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
989 ; CHECK-LABEL: fadd_nxv4f32_x:
990 ; CHECK: // %bb.0: // %entry
991 ; CHECK-NEXT: ptrue p0.s
992 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
993 ; CHECK-NEXT: not p0.b, p0/z, p1.b
994 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
997 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
998 %a = fadd <vscale x 4 x float> %x, %y
999 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %x
1000 ret <vscale x 4 x float> %b
1003 define <vscale x 8 x half> @fadd_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1004 ; CHECK-LABEL: fadd_nxv8f16_x:
1005 ; CHECK: // %bb.0: // %entry
1006 ; CHECK-NEXT: ptrue p0.h
1007 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1008 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1009 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
1012 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1013 %a = fadd <vscale x 8 x half> %x, %y
1014 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %x
1015 ret <vscale x 8 x half> %b
1018 define <vscale x 2 x double> @fadd_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1019 ; CHECK-LABEL: fadd_nxv2f64_x:
1020 ; CHECK: // %bb.0: // %entry
1021 ; CHECK-NEXT: ptrue p0.d
1022 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1023 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1024 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
1027 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1028 %a = fadd <vscale x 2 x double> %x, %y
1029 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %x
1030 ret <vscale x 2 x double> %b
1033 define <vscale x 4 x float> @fsub_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1034 ; CHECK-LABEL: fsub_nxv4f32_x:
1035 ; CHECK: // %bb.0: // %entry
1036 ; CHECK-NEXT: ptrue p0.s
1037 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1038 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1039 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s
1042 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1043 %a = fsub <vscale x 4 x float> %x, %y
1044 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %x
1045 ret <vscale x 4 x float> %b
1048 define <vscale x 8 x half> @fsub_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1049 ; CHECK-LABEL: fsub_nxv8f16_x:
1050 ; CHECK: // %bb.0: // %entry
1051 ; CHECK-NEXT: ptrue p0.h
1052 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1053 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1054 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h
1057 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1058 %a = fsub <vscale x 8 x half> %x, %y
1059 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %x
1060 ret <vscale x 8 x half> %b
1063 define <vscale x 2 x double> @fsub_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1064 ; CHECK-LABEL: fsub_nxv2f64_x:
1065 ; CHECK: // %bb.0: // %entry
1066 ; CHECK-NEXT: ptrue p0.d
1067 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1068 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1069 ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d
1072 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1073 %a = fsub <vscale x 2 x double> %x, %y
1074 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %x
1075 ret <vscale x 2 x double> %b
1078 define <vscale x 4 x float> @fmul_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1079 ; CHECK-LABEL: fmul_nxv4f32_x:
1080 ; CHECK: // %bb.0: // %entry
1081 ; CHECK-NEXT: ptrue p0.s
1082 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1083 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1084 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s
1087 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1088 %a = fmul <vscale x 4 x float> %x, %y
1089 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %x
1090 ret <vscale x 4 x float> %b
1093 define <vscale x 8 x half> @fmul_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1094 ; CHECK-LABEL: fmul_nxv8f16_x:
1095 ; CHECK: // %bb.0: // %entry
1096 ; CHECK-NEXT: ptrue p0.h
1097 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1098 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1099 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
1102 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1103 %a = fmul <vscale x 8 x half> %x, %y
1104 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %x
1105 ret <vscale x 8 x half> %b
1108 define <vscale x 2 x double> @fmul_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1109 ; CHECK-LABEL: fmul_nxv2f64_x:
1110 ; CHECK: // %bb.0: // %entry
1111 ; CHECK-NEXT: ptrue p0.d
1112 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1113 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1114 ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d
1117 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1118 %a = fmul <vscale x 2 x double> %x, %y
1119 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %x
1120 ret <vscale x 2 x double> %b
1123 define <vscale x 4 x float> @fdiv_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1124 ; CHECK-LABEL: fdiv_nxv4f32_x:
1125 ; CHECK: // %bb.0: // %entry
1126 ; CHECK-NEXT: ptrue p0.s
1127 ; CHECK-NEXT: fdivr z1.s, p0/m, z1.s, z0.s
1128 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1129 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1130 ; CHECK-NEXT: mov z0.s, p0/m, z1.s
1133 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1134 %a = fdiv <vscale x 4 x float> %x, %y
1135 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %x
1136 ret <vscale x 4 x float> %b
1139 define <vscale x 8 x half> @fdiv_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1140 ; CHECK-LABEL: fdiv_nxv8f16_x:
1141 ; CHECK: // %bb.0: // %entry
1142 ; CHECK-NEXT: ptrue p0.h
1143 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1144 ; CHECK-NEXT: fdivr z1.h, p0/m, z1.h, z0.h
1145 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1146 ; CHECK-NEXT: mov z0.h, p0/m, z1.h
1149 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1150 %a = fdiv <vscale x 8 x half> %x, %y
1151 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %x
1152 ret <vscale x 8 x half> %b
1155 define <vscale x 2 x double> @fdiv_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1156 ; CHECK-LABEL: fdiv_nxv2f64_x:
1157 ; CHECK: // %bb.0: // %entry
1158 ; CHECK-NEXT: ptrue p0.d
1159 ; CHECK-NEXT: fdivr z1.d, p0/m, z1.d, z0.d
1160 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1161 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1162 ; CHECK-NEXT: mov z0.d, p0/m, z1.d
1165 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1166 %a = fdiv <vscale x 2 x double> %x, %y
1167 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %x
1168 ret <vscale x 2 x double> %b
1171 define <vscale x 4 x float> @minnum_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1172 ; CHECK-LABEL: minnum_nxv4f32_x:
1173 ; CHECK: // %bb.0: // %entry
1174 ; CHECK-NEXT: ptrue p0.s
1175 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1176 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1177 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
1180 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1181 %a = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x float> %y)
1182 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %x
1183 ret <vscale x 4 x float> %b
1186 define <vscale x 8 x half> @minnum_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1187 ; CHECK-LABEL: minnum_nxv8f16_x:
1188 ; CHECK: // %bb.0: // %entry
1189 ; CHECK-NEXT: ptrue p0.h
1190 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1191 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1192 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
1195 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1196 %a = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %x, <vscale x 8 x half> %y)
1197 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %x
1198 ret <vscale x 8 x half> %b
1201 define <vscale x 2 x double> @minnum_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1202 ; CHECK-LABEL: minnum_nxv2f64_x:
1203 ; CHECK: // %bb.0: // %entry
1204 ; CHECK-NEXT: ptrue p0.d
1205 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1206 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1207 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
1210 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1211 %a = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x double> %y)
1212 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %x
1213 ret <vscale x 2 x double> %b
1216 define <vscale x 4 x float> @maxnum_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1217 ; CHECK-LABEL: maxnum_nxv4f32_x:
1218 ; CHECK: // %bb.0: // %entry
1219 ; CHECK-NEXT: ptrue p0.s
1220 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1221 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1222 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
1225 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1226 %a = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x float> %y)
1227 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %x
1228 ret <vscale x 4 x float> %b
1231 define <vscale x 8 x half> @maxnum_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1232 ; CHECK-LABEL: maxnum_nxv8f16_x:
1233 ; CHECK: // %bb.0: // %entry
1234 ; CHECK-NEXT: ptrue p0.h
1235 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1236 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1237 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
1240 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1241 %a = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> %x, <vscale x 8 x half> %y)
1242 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %x
1243 ret <vscale x 8 x half> %b
1246 define <vscale x 2 x double> @maxnum_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1247 ; CHECK-LABEL: maxnum_nxv2f64_x:
1248 ; CHECK: // %bb.0: // %entry
1249 ; CHECK-NEXT: ptrue p0.d
1250 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1251 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1252 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
1255 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1256 %a = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x double> %y)
1257 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %x
1258 ret <vscale x 2 x double> %b
1261 define <vscale x 4 x float> @minimum_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1262 ; CHECK-LABEL: minimum_nxv4f32_x:
1263 ; CHECK: // %bb.0: // %entry
1264 ; CHECK-NEXT: ptrue p0.s
1265 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1266 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1267 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s
1270 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1271 %a = call <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x float> %y)
1272 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %x
1273 ret <vscale x 4 x float> %b
1276 define <vscale x 8 x half> @minimum_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1277 ; CHECK-LABEL: minimum_nxv8f16_x:
1278 ; CHECK: // %bb.0: // %entry
1279 ; CHECK-NEXT: ptrue p0.h
1280 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1281 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1282 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
1285 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1286 %a = call <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half> %x, <vscale x 8 x half> %y)
1287 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %x
1288 ret <vscale x 8 x half> %b
1291 define <vscale x 2 x double> @minimum_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1292 ; CHECK-LABEL: minimum_nxv2f64_x:
1293 ; CHECK: // %bb.0: // %entry
1294 ; CHECK-NEXT: ptrue p0.d
1295 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1296 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1297 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d
1300 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1301 %a = call <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x double> %y)
1302 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %x
1303 ret <vscale x 2 x double> %b
1306 define <vscale x 4 x float> @maximum_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1307 ; CHECK-LABEL: maximum_nxv4f32_x:
1308 ; CHECK: // %bb.0: // %entry
1309 ; CHECK-NEXT: ptrue p0.s
1310 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1311 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1312 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s
1315 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1316 %a = call <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x float> %y)
1317 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %x
1318 ret <vscale x 4 x float> %b
1321 define <vscale x 8 x half> @maximum_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1322 ; CHECK-LABEL: maximum_nxv8f16_x:
1323 ; CHECK: // %bb.0: // %entry
1324 ; CHECK-NEXT: ptrue p0.h
1325 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1326 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1327 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
1330 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1331 %a = call <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half> %x, <vscale x 8 x half> %y)
1332 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %x
1333 ret <vscale x 8 x half> %b
1336 define <vscale x 2 x double> @maximum_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1337 ; CHECK-LABEL: maximum_nxv2f64_x:
1338 ; CHECK: // %bb.0: // %entry
1339 ; CHECK-NEXT: ptrue p0.d
1340 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1341 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1342 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d
1345 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1346 %a = call <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x double> %y)
1347 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %x
1348 ret <vscale x 2 x double> %b
1351 define <vscale x 4 x float> @fmai_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %z, <vscale x 4 x float> %n) {
1352 ; CHECK-LABEL: fmai_nxv4f32_x:
1353 ; CHECK: // %bb.0: // %entry
1354 ; CHECK-NEXT: ptrue p0.s
1355 ; CHECK-NEXT: fcmle p1.s, p0/z, z3.s, #0.0
1356 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1357 ; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
1360 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1361 %a = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> %y, <vscale x 4 x float> %z, <vscale x 4 x float> %x)
1362 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %x
1363 ret <vscale x 4 x float> %b
1366 define <vscale x 8 x half> @fmai_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %z, <vscale x 8 x half> %n) {
1367 ; CHECK-LABEL: fmai_nxv8f16_x:
1368 ; CHECK: // %bb.0: // %entry
1369 ; CHECK-NEXT: ptrue p0.h
1370 ; CHECK-NEXT: fcmle p1.h, p0/z, z3.h, #0.0
1371 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1372 ; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
1375 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1376 %a = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> %y, <vscale x 8 x half> %z, <vscale x 8 x half> %x)
1377 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %x
1378 ret <vscale x 8 x half> %b
1381 define <vscale x 2 x double> @fmai_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %z, <vscale x 2 x double> %n) {
1382 ; CHECK-LABEL: fmai_nxv2f64_x:
1383 ; CHECK: // %bb.0: // %entry
1384 ; CHECK-NEXT: ptrue p0.d
1385 ; CHECK-NEXT: fcmle p1.d, p0/z, z3.d, #0.0
1386 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1387 ; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d
1390 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1391 %a = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> %y, <vscale x 2 x double> %z, <vscale x 2 x double> %x)
1392 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %x
1393 ret <vscale x 2 x double> %b
1396 define <vscale x 4 x float> @fma_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %z, <vscale x 4 x float> %n) {
1397 ; CHECK-LABEL: fma_nxv4f32_x:
1398 ; CHECK: // %bb.0: // %entry
1399 ; CHECK-NEXT: ptrue p0.s
1400 ; CHECK-NEXT: fcmle p1.s, p0/z, z3.s, #0.0
1401 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1402 ; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
1405 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1406 %m = fmul fast <vscale x 4 x float> %y, %z
1407 %a = fadd fast <vscale x 4 x float> %m, %x
1408 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %x
1409 ret <vscale x 4 x float> %b
1412 define <vscale x 8 x half> @fma_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %z, <vscale x 8 x half> %n) {
1413 ; CHECK-LABEL: fma_nxv8f16_x:
1414 ; CHECK: // %bb.0: // %entry
1415 ; CHECK-NEXT: ptrue p0.h
1416 ; CHECK-NEXT: fcmle p1.h, p0/z, z3.h, #0.0
1417 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1418 ; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
1421 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1422 %m = fmul fast <vscale x 8 x half> %y, %z
1423 %a = fadd fast <vscale x 8 x half> %m, %x
1424 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %x
1425 ret <vscale x 8 x half> %b
1428 define <vscale x 2 x double> @fma_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %z, <vscale x 2 x double> %n) {
1429 ; CHECK-LABEL: fma_nxv2f64_x:
1430 ; CHECK: // %bb.0: // %entry
1431 ; CHECK-NEXT: ptrue p0.d
1432 ; CHECK-NEXT: fcmle p1.d, p0/z, z3.d, #0.0
1433 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1434 ; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d
1437 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1438 %m = fmul fast <vscale x 2 x double> %y, %z
1439 %a = fadd fast <vscale x 2 x double> %m, %x
1440 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %x
1441 ret <vscale x 2 x double> %b
1444 define <vscale x 2 x i64> @add_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1445 ; CHECK-LABEL: add_nxv2i64_y:
1446 ; CHECK: // %bb.0: // %entry
1447 ; CHECK-NEXT: ptrue p0.d
1448 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1449 ; CHECK-NEXT: add z1.d, p0/m, z1.d, z0.d
1450 ; CHECK-NEXT: mov z0.d, z1.d
1453 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1454 %a = add <vscale x 2 x i64> %x, %y
1455 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1456 ret <vscale x 2 x i64> %b
1459 define <vscale x 4 x i32> @add_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1460 ; CHECK-LABEL: add_nxv4i32_y:
1461 ; CHECK: // %bb.0: // %entry
1462 ; CHECK-NEXT: ptrue p0.s
1463 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1464 ; CHECK-NEXT: add z1.s, p0/m, z1.s, z0.s
1465 ; CHECK-NEXT: mov z0.d, z1.d
1468 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1469 %a = add <vscale x 4 x i32> %x, %y
1470 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1471 ret <vscale x 4 x i32> %b
1474 define <vscale x 8 x i16> @add_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1475 ; CHECK-LABEL: add_nxv8i16_y:
1476 ; CHECK: // %bb.0: // %entry
1477 ; CHECK-NEXT: ptrue p0.h
1478 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1479 ; CHECK-NEXT: add z1.h, p0/m, z1.h, z0.h
1480 ; CHECK-NEXT: mov z0.d, z1.d
1483 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1484 %a = add <vscale x 8 x i16> %x, %y
1485 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1486 ret <vscale x 8 x i16> %b
1489 define <vscale x 16 x i8> @add_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1490 ; CHECK-LABEL: add_nxv16i8_y:
1491 ; CHECK: // %bb.0: // %entry
1492 ; CHECK-NEXT: ptrue p0.b
1493 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1494 ; CHECK-NEXT: add z1.b, p0/m, z1.b, z0.b
1495 ; CHECK-NEXT: mov z0.d, z1.d
1498 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1499 %a = add <vscale x 16 x i8> %x, %y
1500 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1501 ret <vscale x 16 x i8> %b
1504 define <vscale x 2 x i64> @sub_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1505 ; CHECK-LABEL: sub_nxv2i64_y:
1506 ; CHECK: // %bb.0: // %entry
1507 ; CHECK-NEXT: ptrue p0.d
1508 ; CHECK-NEXT: sub z0.d, z0.d, z1.d
1509 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1510 ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
1513 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1514 %a = sub <vscale x 2 x i64> %x, %y
1515 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1516 ret <vscale x 2 x i64> %b
1519 define <vscale x 4 x i32> @sub_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1520 ; CHECK-LABEL: sub_nxv4i32_y:
1521 ; CHECK: // %bb.0: // %entry
1522 ; CHECK-NEXT: ptrue p0.s
1523 ; CHECK-NEXT: sub z0.s, z0.s, z1.s
1524 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1525 ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
1528 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1529 %a = sub <vscale x 4 x i32> %x, %y
1530 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1531 ret <vscale x 4 x i32> %b
1534 define <vscale x 8 x i16> @sub_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1535 ; CHECK-LABEL: sub_nxv8i16_y:
1536 ; CHECK: // %bb.0: // %entry
1537 ; CHECK-NEXT: ptrue p0.h
1538 ; CHECK-NEXT: sub z0.h, z0.h, z1.h
1539 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1540 ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
1543 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1544 %a = sub <vscale x 8 x i16> %x, %y
1545 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1546 ret <vscale x 8 x i16> %b
1549 define <vscale x 16 x i8> @sub_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1550 ; CHECK-LABEL: sub_nxv16i8_y:
1551 ; CHECK: // %bb.0: // %entry
1552 ; CHECK-NEXT: ptrue p0.b
1553 ; CHECK-NEXT: sub z0.b, z0.b, z1.b
1554 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1555 ; CHECK-NEXT: sel z0.b, p0, z0.b, z1.b
1558 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1559 %a = sub <vscale x 16 x i8> %x, %y
1560 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1561 ret <vscale x 16 x i8> %b
1564 define <vscale x 2 x i64> @mul_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1565 ; CHECK-LABEL: mul_nxv2i64_y:
1566 ; CHECK: // %bb.0: // %entry
1567 ; CHECK-NEXT: ptrue p0.d
1568 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1569 ; CHECK-NEXT: mul z1.d, p0/m, z1.d, z0.d
1570 ; CHECK-NEXT: mov z0.d, z1.d
1573 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1574 %a = mul <vscale x 2 x i64> %x, %y
1575 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1576 ret <vscale x 2 x i64> %b
1579 define <vscale x 4 x i32> @mul_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1580 ; CHECK-LABEL: mul_nxv4i32_y:
1581 ; CHECK: // %bb.0: // %entry
1582 ; CHECK-NEXT: ptrue p0.s
1583 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1584 ; CHECK-NEXT: mul z1.s, p0/m, z1.s, z0.s
1585 ; CHECK-NEXT: mov z0.d, z1.d
1588 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1589 %a = mul <vscale x 4 x i32> %x, %y
1590 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1591 ret <vscale x 4 x i32> %b
1594 define <vscale x 8 x i16> @mul_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1595 ; CHECK-LABEL: mul_nxv8i16_y:
1596 ; CHECK: // %bb.0: // %entry
1597 ; CHECK-NEXT: ptrue p0.h
1598 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1599 ; CHECK-NEXT: mul z1.h, p0/m, z1.h, z0.h
1600 ; CHECK-NEXT: mov z0.d, z1.d
1603 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1604 %a = mul <vscale x 8 x i16> %x, %y
1605 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1606 ret <vscale x 8 x i16> %b
1609 define <vscale x 16 x i8> @mul_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1610 ; CHECK-LABEL: mul_nxv16i8_y:
1611 ; CHECK: // %bb.0: // %entry
1612 ; CHECK-NEXT: ptrue p0.b
1613 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1614 ; CHECK-NEXT: mul z1.b, p0/m, z1.b, z0.b
1615 ; CHECK-NEXT: mov z0.d, z1.d
1618 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1619 %a = mul <vscale x 16 x i8> %x, %y
1620 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1621 ret <vscale x 16 x i8> %b
1624 define <vscale x 2 x i64> @sdiv_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1625 ; CHECK-LABEL: sdiv_nxv2i64_y:
1626 ; CHECK: // %bb.0: // %entry
1627 ; CHECK-NEXT: ptrue p0.d
1628 ; CHECK-NEXT: sdiv z0.d, p0/m, z0.d, z1.d
1629 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1630 ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
1633 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1634 %a = sdiv <vscale x 2 x i64> %x, %y
1635 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1636 ret <vscale x 2 x i64> %b
1639 define <vscale x 4 x i32> @sdiv_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1640 ; CHECK-LABEL: sdiv_nxv4i32_y:
1641 ; CHECK: // %bb.0: // %entry
1642 ; CHECK-NEXT: ptrue p0.s
1643 ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z1.s
1644 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1645 ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
1648 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1649 %a = sdiv <vscale x 4 x i32> %x, %y
1650 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1651 ret <vscale x 4 x i32> %b
1654 define <vscale x 8 x i16> @sdiv_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1655 ; CHECK-LABEL: sdiv_nxv8i16_y:
1656 ; CHECK: // %bb.0: // %entry
1657 ; CHECK-NEXT: ptrue p0.s
1658 ; CHECK-NEXT: sunpkhi z3.s, z1.h
1659 ; CHECK-NEXT: sunpkhi z4.s, z0.h
1660 ; CHECK-NEXT: sunpklo z0.s, z0.h
1661 ; CHECK-NEXT: sdivr z3.s, p0/m, z3.s, z4.s
1662 ; CHECK-NEXT: sunpklo z4.s, z1.h
1663 ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z4.s
1664 ; CHECK-NEXT: ptrue p0.h
1665 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1666 ; CHECK-NEXT: uzp1 z0.h, z0.h, z3.h
1667 ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
1670 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1671 %a = sdiv <vscale x 8 x i16> %x, %y
1672 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1673 ret <vscale x 8 x i16> %b
1676 define <vscale x 16 x i8> @sdiv_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1677 ; CHECK-LABEL: sdiv_nxv16i8_y:
1678 ; CHECK: // %bb.0: // %entry
1679 ; CHECK-NEXT: sunpkhi z3.h, z1.b
1680 ; CHECK-NEXT: sunpkhi z4.h, z0.b
1681 ; CHECK-NEXT: ptrue p0.s
1682 ; CHECK-NEXT: sunpklo z0.h, z0.b
1683 ; CHECK-NEXT: sunpkhi z5.s, z3.h
1684 ; CHECK-NEXT: sunpkhi z6.s, z4.h
1685 ; CHECK-NEXT: sunpklo z3.s, z3.h
1686 ; CHECK-NEXT: sunpklo z4.s, z4.h
1687 ; CHECK-NEXT: sunpkhi z7.s, z0.h
1688 ; CHECK-NEXT: sunpklo z0.s, z0.h
1689 ; CHECK-NEXT: sdivr z5.s, p0/m, z5.s, z6.s
1690 ; CHECK-NEXT: sdivr z3.s, p0/m, z3.s, z4.s
1691 ; CHECK-NEXT: sunpklo z4.h, z1.b
1692 ; CHECK-NEXT: sunpkhi z6.s, z4.h
1693 ; CHECK-NEXT: sunpklo z4.s, z4.h
1694 ; CHECK-NEXT: sdivr z6.s, p0/m, z6.s, z7.s
1695 ; CHECK-NEXT: uzp1 z3.h, z3.h, z5.h
1696 ; CHECK-NEXT: sdiv z0.s, p0/m, z0.s, z4.s
1697 ; CHECK-NEXT: ptrue p0.b
1698 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1699 ; CHECK-NEXT: uzp1 z0.h, z0.h, z6.h
1700 ; CHECK-NEXT: uzp1 z0.b, z0.b, z3.b
1701 ; CHECK-NEXT: sel z0.b, p0, z0.b, z1.b
1704 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1705 %a = sdiv <vscale x 16 x i8> %x, %y
1706 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1707 ret <vscale x 16 x i8> %b
1710 define <vscale x 2 x i64> @udiv_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1711 ; CHECK-LABEL: udiv_nxv2i64_y:
1712 ; CHECK: // %bb.0: // %entry
1713 ; CHECK-NEXT: ptrue p0.d
1714 ; CHECK-NEXT: udiv z0.d, p0/m, z0.d, z1.d
1715 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1716 ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
1719 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1720 %a = udiv <vscale x 2 x i64> %x, %y
1721 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1722 ret <vscale x 2 x i64> %b
1725 define <vscale x 4 x i32> @udiv_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1726 ; CHECK-LABEL: udiv_nxv4i32_y:
1727 ; CHECK: // %bb.0: // %entry
1728 ; CHECK-NEXT: ptrue p0.s
1729 ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z1.s
1730 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1731 ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
1734 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1735 %a = udiv <vscale x 4 x i32> %x, %y
1736 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1737 ret <vscale x 4 x i32> %b
1740 define <vscale x 8 x i16> @udiv_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1741 ; CHECK-LABEL: udiv_nxv8i16_y:
1742 ; CHECK: // %bb.0: // %entry
1743 ; CHECK-NEXT: ptrue p0.s
1744 ; CHECK-NEXT: uunpkhi z3.s, z1.h
1745 ; CHECK-NEXT: uunpkhi z4.s, z0.h
1746 ; CHECK-NEXT: uunpklo z0.s, z0.h
1747 ; CHECK-NEXT: udivr z3.s, p0/m, z3.s, z4.s
1748 ; CHECK-NEXT: uunpklo z4.s, z1.h
1749 ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z4.s
1750 ; CHECK-NEXT: ptrue p0.h
1751 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1752 ; CHECK-NEXT: uzp1 z0.h, z0.h, z3.h
1753 ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
1756 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1757 %a = udiv <vscale x 8 x i16> %x, %y
1758 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1759 ret <vscale x 8 x i16> %b
1762 define <vscale x 16 x i8> @udiv_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1763 ; CHECK-LABEL: udiv_nxv16i8_y:
1764 ; CHECK: // %bb.0: // %entry
1765 ; CHECK-NEXT: uunpkhi z3.h, z1.b
1766 ; CHECK-NEXT: uunpkhi z4.h, z0.b
1767 ; CHECK-NEXT: ptrue p0.s
1768 ; CHECK-NEXT: uunpklo z0.h, z0.b
1769 ; CHECK-NEXT: uunpkhi z5.s, z3.h
1770 ; CHECK-NEXT: uunpkhi z6.s, z4.h
1771 ; CHECK-NEXT: uunpklo z3.s, z3.h
1772 ; CHECK-NEXT: uunpklo z4.s, z4.h
1773 ; CHECK-NEXT: uunpkhi z7.s, z0.h
1774 ; CHECK-NEXT: uunpklo z0.s, z0.h
1775 ; CHECK-NEXT: udivr z5.s, p0/m, z5.s, z6.s
1776 ; CHECK-NEXT: udivr z3.s, p0/m, z3.s, z4.s
1777 ; CHECK-NEXT: uunpklo z4.h, z1.b
1778 ; CHECK-NEXT: uunpkhi z6.s, z4.h
1779 ; CHECK-NEXT: uunpklo z4.s, z4.h
1780 ; CHECK-NEXT: udivr z6.s, p0/m, z6.s, z7.s
1781 ; CHECK-NEXT: uzp1 z3.h, z3.h, z5.h
1782 ; CHECK-NEXT: udiv z0.s, p0/m, z0.s, z4.s
1783 ; CHECK-NEXT: ptrue p0.b
1784 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1785 ; CHECK-NEXT: uzp1 z0.h, z0.h, z6.h
1786 ; CHECK-NEXT: uzp1 z0.b, z0.b, z3.b
1787 ; CHECK-NEXT: sel z0.b, p0, z0.b, z1.b
1790 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1791 %a = udiv <vscale x 16 x i8> %x, %y
1792 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1793 ret <vscale x 16 x i8> %b
1796 define <vscale x 2 x i64> @srem_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1797 ; CHECK-LABEL: srem_nxv2i64_y:
1798 ; CHECK: // %bb.0: // %entry
1799 ; CHECK-NEXT: ptrue p0.d
1800 ; CHECK-NEXT: movprfx z3, z0
1801 ; CHECK-NEXT: sdiv z3.d, p0/m, z3.d, z1.d
1802 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1803 ; CHECK-NEXT: msb z1.d, p0/m, z3.d, z0.d
1804 ; CHECK-NEXT: mov z0.d, z1.d
1807 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1808 %a = srem <vscale x 2 x i64> %x, %y
1809 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1810 ret <vscale x 2 x i64> %b
1813 define <vscale x 4 x i32> @srem_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1814 ; CHECK-LABEL: srem_nxv4i32_y:
1815 ; CHECK: // %bb.0: // %entry
1816 ; CHECK-NEXT: ptrue p0.s
1817 ; CHECK-NEXT: movprfx z3, z0
1818 ; CHECK-NEXT: sdiv z3.s, p0/m, z3.s, z1.s
1819 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1820 ; CHECK-NEXT: msb z1.s, p0/m, z3.s, z0.s
1821 ; CHECK-NEXT: mov z0.d, z1.d
1824 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1825 %a = srem <vscale x 4 x i32> %x, %y
1826 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1827 ret <vscale x 4 x i32> %b
1830 define <vscale x 8 x i16> @srem_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1831 ; CHECK-LABEL: srem_nxv8i16_y:
1832 ; CHECK: // %bb.0: // %entry
1833 ; CHECK-NEXT: ptrue p0.s
1834 ; CHECK-NEXT: sunpkhi z3.s, z1.h
1835 ; CHECK-NEXT: sunpkhi z4.s, z0.h
1836 ; CHECK-NEXT: sunpklo z5.s, z0.h
1837 ; CHECK-NEXT: sdivr z3.s, p0/m, z3.s, z4.s
1838 ; CHECK-NEXT: sunpklo z4.s, z1.h
1839 ; CHECK-NEXT: sdivr z4.s, p0/m, z4.s, z5.s
1840 ; CHECK-NEXT: ptrue p0.h
1841 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1842 ; CHECK-NEXT: uzp1 z3.h, z4.h, z3.h
1843 ; CHECK-NEXT: msb z1.h, p0/m, z3.h, z0.h
1844 ; CHECK-NEXT: mov z0.d, z1.d
1847 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1848 %a = srem <vscale x 8 x i16> %x, %y
1849 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1850 ret <vscale x 8 x i16> %b
1853 define <vscale x 16 x i8> @srem_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1854 ; CHECK-LABEL: srem_nxv16i8_y:
1855 ; CHECK: // %bb.0: // %entry
1856 ; CHECK-NEXT: sunpkhi z3.h, z1.b
1857 ; CHECK-NEXT: sunpkhi z4.h, z0.b
1858 ; CHECK-NEXT: ptrue p0.s
1859 ; CHECK-NEXT: sunpkhi z5.s, z3.h
1860 ; CHECK-NEXT: sunpkhi z6.s, z4.h
1861 ; CHECK-NEXT: sunpklo z3.s, z3.h
1862 ; CHECK-NEXT: sunpklo z4.s, z4.h
1863 ; CHECK-NEXT: sdivr z5.s, p0/m, z5.s, z6.s
1864 ; CHECK-NEXT: sunpklo z6.h, z0.b
1865 ; CHECK-NEXT: sunpkhi z24.s, z6.h
1866 ; CHECK-NEXT: sunpklo z6.s, z6.h
1867 ; CHECK-NEXT: sdivr z3.s, p0/m, z3.s, z4.s
1868 ; CHECK-NEXT: sunpklo z4.h, z1.b
1869 ; CHECK-NEXT: sunpkhi z7.s, z4.h
1870 ; CHECK-NEXT: sunpklo z4.s, z4.h
1871 ; CHECK-NEXT: sdivr z7.s, p0/m, z7.s, z24.s
1872 ; CHECK-NEXT: uzp1 z3.h, z3.h, z5.h
1873 ; CHECK-NEXT: sdivr z4.s, p0/m, z4.s, z6.s
1874 ; CHECK-NEXT: ptrue p0.b
1875 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1876 ; CHECK-NEXT: uzp1 z4.h, z4.h, z7.h
1877 ; CHECK-NEXT: uzp1 z3.b, z4.b, z3.b
1878 ; CHECK-NEXT: msb z1.b, p0/m, z3.b, z0.b
1879 ; CHECK-NEXT: mov z0.d, z1.d
1882 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1883 %a = srem <vscale x 16 x i8> %x, %y
1884 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1885 ret <vscale x 16 x i8> %b
1888 define <vscale x 2 x i64> @urem_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1889 ; CHECK-LABEL: urem_nxv2i64_y:
1890 ; CHECK: // %bb.0: // %entry
1891 ; CHECK-NEXT: ptrue p0.d
1892 ; CHECK-NEXT: movprfx z3, z0
1893 ; CHECK-NEXT: udiv z3.d, p0/m, z3.d, z1.d
1894 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1895 ; CHECK-NEXT: msb z1.d, p0/m, z3.d, z0.d
1896 ; CHECK-NEXT: mov z0.d, z1.d
1899 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1900 %a = urem <vscale x 2 x i64> %x, %y
1901 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1902 ret <vscale x 2 x i64> %b
1905 define <vscale x 4 x i32> @urem_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1906 ; CHECK-LABEL: urem_nxv4i32_y:
1907 ; CHECK: // %bb.0: // %entry
1908 ; CHECK-NEXT: ptrue p0.s
1909 ; CHECK-NEXT: movprfx z3, z0
1910 ; CHECK-NEXT: udiv z3.s, p0/m, z3.s, z1.s
1911 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1912 ; CHECK-NEXT: msb z1.s, p0/m, z3.s, z0.s
1913 ; CHECK-NEXT: mov z0.d, z1.d
1916 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1917 %a = urem <vscale x 4 x i32> %x, %y
1918 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1919 ret <vscale x 4 x i32> %b
1922 define <vscale x 8 x i16> @urem_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1923 ; CHECK-LABEL: urem_nxv8i16_y:
1924 ; CHECK: // %bb.0: // %entry
1925 ; CHECK-NEXT: ptrue p0.s
1926 ; CHECK-NEXT: uunpkhi z3.s, z1.h
1927 ; CHECK-NEXT: uunpkhi z4.s, z0.h
1928 ; CHECK-NEXT: uunpklo z5.s, z0.h
1929 ; CHECK-NEXT: udivr z3.s, p0/m, z3.s, z4.s
1930 ; CHECK-NEXT: uunpklo z4.s, z1.h
1931 ; CHECK-NEXT: udivr z4.s, p0/m, z4.s, z5.s
1932 ; CHECK-NEXT: ptrue p0.h
1933 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1934 ; CHECK-NEXT: uzp1 z3.h, z4.h, z3.h
1935 ; CHECK-NEXT: msb z1.h, p0/m, z3.h, z0.h
1936 ; CHECK-NEXT: mov z0.d, z1.d
1939 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1940 %a = urem <vscale x 8 x i16> %x, %y
1941 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1942 ret <vscale x 8 x i16> %b
1945 define <vscale x 16 x i8> @urem_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1946 ; CHECK-LABEL: urem_nxv16i8_y:
1947 ; CHECK: // %bb.0: // %entry
1948 ; CHECK-NEXT: uunpkhi z3.h, z1.b
1949 ; CHECK-NEXT: uunpkhi z4.h, z0.b
1950 ; CHECK-NEXT: ptrue p0.s
1951 ; CHECK-NEXT: uunpkhi z5.s, z3.h
1952 ; CHECK-NEXT: uunpkhi z6.s, z4.h
1953 ; CHECK-NEXT: uunpklo z3.s, z3.h
1954 ; CHECK-NEXT: uunpklo z4.s, z4.h
1955 ; CHECK-NEXT: udivr z5.s, p0/m, z5.s, z6.s
1956 ; CHECK-NEXT: uunpklo z6.h, z0.b
1957 ; CHECK-NEXT: uunpkhi z24.s, z6.h
1958 ; CHECK-NEXT: uunpklo z6.s, z6.h
1959 ; CHECK-NEXT: udivr z3.s, p0/m, z3.s, z4.s
1960 ; CHECK-NEXT: uunpklo z4.h, z1.b
1961 ; CHECK-NEXT: uunpkhi z7.s, z4.h
1962 ; CHECK-NEXT: uunpklo z4.s, z4.h
1963 ; CHECK-NEXT: udivr z7.s, p0/m, z7.s, z24.s
1964 ; CHECK-NEXT: uzp1 z3.h, z3.h, z5.h
1965 ; CHECK-NEXT: udivr z4.s, p0/m, z4.s, z6.s
1966 ; CHECK-NEXT: ptrue p0.b
1967 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1968 ; CHECK-NEXT: uzp1 z4.h, z4.h, z7.h
1969 ; CHECK-NEXT: uzp1 z3.b, z4.b, z3.b
1970 ; CHECK-NEXT: msb z1.b, p0/m, z3.b, z0.b
1971 ; CHECK-NEXT: mov z0.d, z1.d
1974 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1975 %a = urem <vscale x 16 x i8> %x, %y
1976 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1977 ret <vscale x 16 x i8> %b
1980 define <vscale x 2 x i64> @and_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1981 ; CHECK-LABEL: and_nxv2i64_y:
1982 ; CHECK: // %bb.0: // %entry
1983 ; CHECK-NEXT: ptrue p0.d
1984 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1985 ; CHECK-NEXT: and z1.d, p0/m, z1.d, z0.d
1986 ; CHECK-NEXT: mov z0.d, z1.d
1989 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1990 %a = and <vscale x 2 x i64> %x, %y
1991 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1992 ret <vscale x 2 x i64> %b
1995 define <vscale x 4 x i32> @and_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1996 ; CHECK-LABEL: and_nxv4i32_y:
1997 ; CHECK: // %bb.0: // %entry
1998 ; CHECK-NEXT: ptrue p0.s
1999 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
2000 ; CHECK-NEXT: and z1.s, p0/m, z1.s, z0.s
2001 ; CHECK-NEXT: mov z0.d, z1.d
2004 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
2005 %a = and <vscale x 4 x i32> %x, %y
2006 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
2007 ret <vscale x 4 x i32> %b
2010 define <vscale x 8 x i16> @and_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
2011 ; CHECK-LABEL: and_nxv8i16_y:
2012 ; CHECK: // %bb.0: // %entry
2013 ; CHECK-NEXT: ptrue p0.h
2014 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
2015 ; CHECK-NEXT: and z1.h, p0/m, z1.h, z0.h
2016 ; CHECK-NEXT: mov z0.d, z1.d
2019 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
2020 %a = and <vscale x 8 x i16> %x, %y
2021 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
2022 ret <vscale x 8 x i16> %b
2025 define <vscale x 16 x i8> @and_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
2026 ; CHECK-LABEL: and_nxv16i8_y:
2027 ; CHECK: // %bb.0: // %entry
2028 ; CHECK-NEXT: ptrue p0.b
2029 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
2030 ; CHECK-NEXT: and z1.b, p0/m, z1.b, z0.b
2031 ; CHECK-NEXT: mov z0.d, z1.d
2034 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
2035 %a = and <vscale x 16 x i8> %x, %y
2036 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
2037 ret <vscale x 16 x i8> %b
2040 define <vscale x 2 x i64> @or_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
2041 ; CHECK-LABEL: or_nxv2i64_y:
2042 ; CHECK: // %bb.0: // %entry
2043 ; CHECK-NEXT: ptrue p0.d
2044 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
2045 ; CHECK-NEXT: orr z1.d, p0/m, z1.d, z0.d
2046 ; CHECK-NEXT: mov z0.d, z1.d
2049 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
2050 %a = or <vscale x 2 x i64> %x, %y
2051 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
2052 ret <vscale x 2 x i64> %b
2055 define <vscale x 4 x i32> @or_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
2056 ; CHECK-LABEL: or_nxv4i32_y:
2057 ; CHECK: // %bb.0: // %entry
2058 ; CHECK-NEXT: ptrue p0.s
2059 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
2060 ; CHECK-NEXT: orr z1.s, p0/m, z1.s, z0.s
2061 ; CHECK-NEXT: mov z0.d, z1.d
2064 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
2065 %a = or <vscale x 4 x i32> %x, %y
2066 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
2067 ret <vscale x 4 x i32> %b
2070 define <vscale x 8 x i16> @or_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
2071 ; CHECK-LABEL: or_nxv8i16_y:
2072 ; CHECK: // %bb.0: // %entry
2073 ; CHECK-NEXT: ptrue p0.h
2074 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
2075 ; CHECK-NEXT: orr z1.h, p0/m, z1.h, z0.h
2076 ; CHECK-NEXT: mov z0.d, z1.d
2079 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
2080 %a = or <vscale x 8 x i16> %x, %y
2081 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
2082 ret <vscale x 8 x i16> %b
2085 define <vscale x 16 x i8> @or_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
2086 ; CHECK-LABEL: or_nxv16i8_y:
2087 ; CHECK: // %bb.0: // %entry
2088 ; CHECK-NEXT: ptrue p0.b
2089 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
2090 ; CHECK-NEXT: orr z1.b, p0/m, z1.b, z0.b
2091 ; CHECK-NEXT: mov z0.d, z1.d
2094 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
2095 %a = or <vscale x 16 x i8> %x, %y
2096 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
2097 ret <vscale x 16 x i8> %b
2100 define <vscale x 2 x i64> @xor_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
2101 ; CHECK-LABEL: xor_nxv2i64_y:
2102 ; CHECK: // %bb.0: // %entry
2103 ; CHECK-NEXT: ptrue p0.d
2104 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
2105 ; CHECK-NEXT: eor z1.d, p0/m, z1.d, z0.d
2106 ; CHECK-NEXT: mov z0.d, z1.d
2109 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
2110 %a = xor <vscale x 2 x i64> %x, %y
2111 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
2112 ret <vscale x 2 x i64> %b
2115 define <vscale x 4 x i32> @xor_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
2116 ; CHECK-LABEL: xor_nxv4i32_y:
2117 ; CHECK: // %bb.0: // %entry
2118 ; CHECK-NEXT: ptrue p0.s
2119 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
2120 ; CHECK-NEXT: eor z1.s, p0/m, z1.s, z0.s
2121 ; CHECK-NEXT: mov z0.d, z1.d
2124 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
2125 %a = xor <vscale x 4 x i32> %x, %y
2126 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
2127 ret <vscale x 4 x i32> %b
2130 define <vscale x 8 x i16> @xor_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
2131 ; CHECK-LABEL: xor_nxv8i16_y:
2132 ; CHECK: // %bb.0: // %entry
2133 ; CHECK-NEXT: ptrue p0.h
2134 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
2135 ; CHECK-NEXT: eor z1.h, p0/m, z1.h, z0.h
2136 ; CHECK-NEXT: mov z0.d, z1.d
2139 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
2140 %a = xor <vscale x 8 x i16> %x, %y
2141 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
2142 ret <vscale x 8 x i16> %b
2145 define <vscale x 16 x i8> @xor_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
2146 ; CHECK-LABEL: xor_nxv16i8_y:
2147 ; CHECK: // %bb.0: // %entry
2148 ; CHECK-NEXT: ptrue p0.b
2149 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
2150 ; CHECK-NEXT: eor z1.b, p0/m, z1.b, z0.b
2151 ; CHECK-NEXT: mov z0.d, z1.d
2154 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
2155 %a = xor <vscale x 16 x i8> %x, %y
2156 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
2157 ret <vscale x 16 x i8> %b
2160 define <vscale x 2 x i64> @shl_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
2161 ; CHECK-LABEL: shl_nxv2i64_y:
2162 ; CHECK: // %bb.0: // %entry
2163 ; CHECK-NEXT: ptrue p0.d
2164 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
2165 ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d
2166 ; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
2169 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
2170 %a = shl <vscale x 2 x i64> %x, %y
2171 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
2172 ret <vscale x 2 x i64> %b
2175 define <vscale x 4 x i32> @shl_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
2176 ; CHECK-LABEL: shl_nxv4i32_y:
2177 ; CHECK: // %bb.0: // %entry
2178 ; CHECK-NEXT: ptrue p0.s
2179 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
2180 ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s
2181 ; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
2184 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
2185 %a = shl <vscale x 4 x i32> %x, %y
2186 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
2187 ret <vscale x 4 x i32> %b
2190 define <vscale x 8 x i16> @shl_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
2191 ; CHECK-LABEL: shl_nxv8i16_y:
2192 ; CHECK: // %bb.0: // %entry
2193 ; CHECK-NEXT: ptrue p0.h
2194 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
2195 ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h
2196 ; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
2199 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
2200 %a = shl <vscale x 8 x i16> %x, %y
2201 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
2202 ret <vscale x 8 x i16> %b
2205 define <vscale x 16 x i8> @shl_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
2206 ; CHECK-LABEL: shl_nxv16i8_y:
2207 ; CHECK: // %bb.0: // %entry
2208 ; CHECK-NEXT: ptrue p0.b
2209 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
2210 ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b
2211 ; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
2214 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
2215 %a = shl <vscale x 16 x i8> %x, %y
2216 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
2217 ret <vscale x 16 x i8> %b
2220 define <vscale x 2 x i64> @ashr_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
2221 ; CHECK-LABEL: ashr_nxv2i64_y:
2222 ; CHECK: // %bb.0: // %entry
2223 ; CHECK-NEXT: ptrue p0.d
2224 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
2225 ; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d
2226 ; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
2229 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
2230 %a = ashr <vscale x 2 x i64> %x, %y
2231 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
2232 ret <vscale x 2 x i64> %b
2235 define <vscale x 4 x i32> @ashr_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
2236 ; CHECK-LABEL: ashr_nxv4i32_y:
2237 ; CHECK: // %bb.0: // %entry
2238 ; CHECK-NEXT: ptrue p0.s
2239 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
2240 ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s
2241 ; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
2244 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
2245 %a = ashr <vscale x 4 x i32> %x, %y
2246 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
2247 ret <vscale x 4 x i32> %b
2250 define <vscale x 8 x i16> @ashr_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
2251 ; CHECK-LABEL: ashr_nxv8i16_y:
2252 ; CHECK: // %bb.0: // %entry
2253 ; CHECK-NEXT: ptrue p0.h
2254 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
2255 ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h
2256 ; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
2259 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
2260 %a = ashr <vscale x 8 x i16> %x, %y
2261 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
2262 ret <vscale x 8 x i16> %b
2265 define <vscale x 16 x i8> @ashr_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
2266 ; CHECK-LABEL: ashr_nxv16i8_y:
2267 ; CHECK: // %bb.0: // %entry
2268 ; CHECK-NEXT: ptrue p0.b
2269 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
2270 ; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b
2271 ; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
2274 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
2275 %a = ashr <vscale x 16 x i8> %x, %y
2276 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
2277 ret <vscale x 16 x i8> %b
2280 define <vscale x 2 x i64> @lshr_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
2281 ; CHECK-LABEL: lshr_nxv2i64_y:
2282 ; CHECK: // %bb.0: // %entry
2283 ; CHECK-NEXT: ptrue p0.d
2284 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
2285 ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d
2286 ; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
2289 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
2290 %a = lshr <vscale x 2 x i64> %x, %y
2291 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
2292 ret <vscale x 2 x i64> %b
2295 define <vscale x 4 x i32> @lshr_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
2296 ; CHECK-LABEL: lshr_nxv4i32_y:
2297 ; CHECK: // %bb.0: // %entry
2298 ; CHECK-NEXT: ptrue p0.s
2299 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
2300 ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s
2301 ; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
2304 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
2305 %a = lshr <vscale x 4 x i32> %x, %y
2306 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
2307 ret <vscale x 4 x i32> %b
2310 define <vscale x 8 x i16> @lshr_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
2311 ; CHECK-LABEL: lshr_nxv8i16_y:
2312 ; CHECK: // %bb.0: // %entry
2313 ; CHECK-NEXT: ptrue p0.h
2314 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
2315 ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h
2316 ; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
2319 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
2320 %a = lshr <vscale x 8 x i16> %x, %y
2321 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
2322 ret <vscale x 8 x i16> %b
2325 define <vscale x 16 x i8> @lshr_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
2326 ; CHECK-LABEL: lshr_nxv16i8_y:
2327 ; CHECK: // %bb.0: // %entry
2328 ; CHECK-NEXT: ptrue p0.b
2329 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
2330 ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b
2331 ; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
2334 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
2335 %a = lshr <vscale x 16 x i8> %x, %y
2336 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
2337 ret <vscale x 16 x i8> %b
2340 define <vscale x 2 x i64> @mla_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %z, <vscale x 2 x i64> %n) {
2341 ; CHECK-LABEL: mla_nxv2i64_y:
2342 ; CHECK: // %bb.0: // %entry
2343 ; CHECK-NEXT: ptrue p0.d
2344 ; CHECK-NEXT: cmpgt p0.d, p0/z, z3.d, #0
2345 ; CHECK-NEXT: mad z1.d, p0/m, z2.d, z0.d
2346 ; CHECK-NEXT: mov z0.d, z1.d
2349 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
2350 %m = mul <vscale x 2 x i64> %y, %z
2351 %a = add <vscale x 2 x i64> %x, %m
2352 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
2353 ret <vscale x 2 x i64> %b
2356 define <vscale x 4 x i32> @mla_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %z, <vscale x 4 x i32> %n) {
2357 ; CHECK-LABEL: mla_nxv4i32_y:
2358 ; CHECK: // %bb.0: // %entry
2359 ; CHECK-NEXT: ptrue p0.s
2360 ; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, #0
2361 ; CHECK-NEXT: mad z1.s, p0/m, z2.s, z0.s
2362 ; CHECK-NEXT: mov z0.d, z1.d
2365 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
2366 %m = mul <vscale x 4 x i32> %y, %z
2367 %a = add <vscale x 4 x i32> %x, %m
2368 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
2369 ret <vscale x 4 x i32> %b
2372 define <vscale x 8 x i16> @mla_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %z, <vscale x 8 x i16> %n) {
2373 ; CHECK-LABEL: mla_nxv8i16_y:
2374 ; CHECK: // %bb.0: // %entry
2375 ; CHECK-NEXT: ptrue p0.h
2376 ; CHECK-NEXT: cmpgt p0.h, p0/z, z3.h, #0
2377 ; CHECK-NEXT: mad z1.h, p0/m, z2.h, z0.h
2378 ; CHECK-NEXT: mov z0.d, z1.d
2381 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
2382 %m = mul <vscale x 8 x i16> %y, %z
2383 %a = add <vscale x 8 x i16> %x, %m
2384 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
2385 ret <vscale x 8 x i16> %b
2388 define <vscale x 16 x i8> @mla_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z, <vscale x 16 x i8> %n) {
2389 ; CHECK-LABEL: mla_nxv16i8_y:
2390 ; CHECK: // %bb.0: // %entry
2391 ; CHECK-NEXT: ptrue p0.b
2392 ; CHECK-NEXT: cmpgt p0.b, p0/z, z3.b, #0
2393 ; CHECK-NEXT: mad z1.b, p0/m, z2.b, z0.b
2394 ; CHECK-NEXT: mov z0.d, z1.d
2397 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
2398 %m = mul <vscale x 16 x i8> %y, %z
2399 %a = add <vscale x 16 x i8> %x, %m
2400 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
2401 ret <vscale x 16 x i8> %b
2404 define <vscale x 2 x i64> @mls_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %z, <vscale x 2 x i64> %n) {
2405 ; CHECK-LABEL: mls_nxv2i64_y:
2406 ; CHECK: // %bb.0: // %entry
2407 ; CHECK-NEXT: ptrue p0.d
2408 ; CHECK-NEXT: cmpgt p0.d, p0/z, z3.d, #0
2409 ; CHECK-NEXT: msb z1.d, p0/m, z0.d, z2.d
2410 ; CHECK-NEXT: mov z0.d, z1.d
2413 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
2414 %m = mul <vscale x 2 x i64> %x, %y
2415 %a = sub <vscale x 2 x i64> %z, %m
2416 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
2417 ret <vscale x 2 x i64> %b
2420 define <vscale x 4 x i32> @mls_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %z, <vscale x 4 x i32> %n) {
2421 ; CHECK-LABEL: mls_nxv4i32_y:
2422 ; CHECK: // %bb.0: // %entry
2423 ; CHECK-NEXT: ptrue p0.s
2424 ; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, #0
2425 ; CHECK-NEXT: msb z1.s, p0/m, z0.s, z2.s
2426 ; CHECK-NEXT: mov z0.d, z1.d
2429 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
2430 %m = mul <vscale x 4 x i32> %x, %y
2431 %a = sub <vscale x 4 x i32> %z, %m
2432 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
2433 ret <vscale x 4 x i32> %b
2436 define <vscale x 8 x i16> @mls_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %z, <vscale x 8 x i16> %n) {
2437 ; CHECK-LABEL: mls_nxv8i16_y:
2438 ; CHECK: // %bb.0: // %entry
2439 ; CHECK-NEXT: ptrue p0.h
2440 ; CHECK-NEXT: cmpgt p0.h, p0/z, z3.h, #0
2441 ; CHECK-NEXT: msb z1.h, p0/m, z0.h, z2.h
2442 ; CHECK-NEXT: mov z0.d, z1.d
2445 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
2446 %m = mul <vscale x 8 x i16> %x, %y
2447 %a = sub <vscale x 8 x i16> %z, %m
2448 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
2449 ret <vscale x 8 x i16> %b
2452 define <vscale x 16 x i8> @mls_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z, <vscale x 16 x i8> %n) {
2453 ; CHECK-LABEL: mls_nxv16i8_y:
2454 ; CHECK: // %bb.0: // %entry
2455 ; CHECK-NEXT: ptrue p0.b
2456 ; CHECK-NEXT: cmpgt p0.b, p0/z, z3.b, #0
2457 ; CHECK-NEXT: msb z1.b, p0/m, z0.b, z2.b
2458 ; CHECK-NEXT: mov z0.d, z1.d
2461 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
2462 %m = mul <vscale x 16 x i8> %x, %y
2463 %a = sub <vscale x 16 x i8> %z, %m
2464 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
2465 ret <vscale x 16 x i8> %b
2468 define <vscale x 4 x float> @fadd_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
2469 ; CHECK-LABEL: fadd_nxv4f32_y:
2470 ; CHECK: // %bb.0: // %entry
2471 ; CHECK-NEXT: ptrue p0.s
2472 ; CHECK-NEXT: fadd z0.s, z0.s, z1.s
2473 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
2474 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2475 ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
2478 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
2479 %a = fadd <vscale x 4 x float> %x, %y
2480 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
2481 ret <vscale x 4 x float> %b
2484 define <vscale x 8 x half> @fadd_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
2485 ; CHECK-LABEL: fadd_nxv8f16_y:
2486 ; CHECK: // %bb.0: // %entry
2487 ; CHECK-NEXT: ptrue p0.h
2488 ; CHECK-NEXT: fadd z0.h, z0.h, z1.h
2489 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
2490 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2491 ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
2494 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
2495 %a = fadd <vscale x 8 x half> %x, %y
2496 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
2497 ret <vscale x 8 x half> %b
2500 define <vscale x 2 x double> @fadd_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
2501 ; CHECK-LABEL: fadd_nxv2f64_y:
2502 ; CHECK: // %bb.0: // %entry
2503 ; CHECK-NEXT: ptrue p0.d
2504 ; CHECK-NEXT: fadd z0.d, z0.d, z1.d
2505 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
2506 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2507 ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
2510 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
2511 %a = fadd <vscale x 2 x double> %x, %y
2512 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
2513 ret <vscale x 2 x double> %b
2516 define <vscale x 4 x float> @fsub_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
2517 ; CHECK-LABEL: fsub_nxv4f32_y:
2518 ; CHECK: // %bb.0: // %entry
2519 ; CHECK-NEXT: ptrue p0.s
2520 ; CHECK-NEXT: fsub z0.s, z0.s, z1.s
2521 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
2522 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2523 ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
2526 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
2527 %a = fsub <vscale x 4 x float> %x, %y
2528 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
2529 ret <vscale x 4 x float> %b
2532 define <vscale x 8 x half> @fsub_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
2533 ; CHECK-LABEL: fsub_nxv8f16_y:
2534 ; CHECK: // %bb.0: // %entry
2535 ; CHECK-NEXT: ptrue p0.h
2536 ; CHECK-NEXT: fsub z0.h, z0.h, z1.h
2537 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
2538 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2539 ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
2542 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
2543 %a = fsub <vscale x 8 x half> %x, %y
2544 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
2545 ret <vscale x 8 x half> %b
2548 define <vscale x 2 x double> @fsub_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
2549 ; CHECK-LABEL: fsub_nxv2f64_y:
2550 ; CHECK: // %bb.0: // %entry
2551 ; CHECK-NEXT: ptrue p0.d
2552 ; CHECK-NEXT: fsub z0.d, z0.d, z1.d
2553 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
2554 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2555 ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
2558 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
2559 %a = fsub <vscale x 2 x double> %x, %y
2560 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
2561 ret <vscale x 2 x double> %b
2564 define <vscale x 4 x float> @fmul_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
2565 ; CHECK-LABEL: fmul_nxv4f32_y:
2566 ; CHECK: // %bb.0: // %entry
2567 ; CHECK-NEXT: ptrue p0.s
2568 ; CHECK-NEXT: fmul z0.s, z0.s, z1.s
2569 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
2570 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2571 ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
2574 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
2575 %a = fmul <vscale x 4 x float> %x, %y
2576 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
2577 ret <vscale x 4 x float> %b
2580 define <vscale x 8 x half> @fmul_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
2581 ; CHECK-LABEL: fmul_nxv8f16_y:
2582 ; CHECK: // %bb.0: // %entry
2583 ; CHECK-NEXT: ptrue p0.h
2584 ; CHECK-NEXT: fmul z0.h, z0.h, z1.h
2585 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
2586 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2587 ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
2590 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
2591 %a = fmul <vscale x 8 x half> %x, %y
2592 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
2593 ret <vscale x 8 x half> %b
2596 define <vscale x 2 x double> @fmul_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
2597 ; CHECK-LABEL: fmul_nxv2f64_y:
2598 ; CHECK: // %bb.0: // %entry
2599 ; CHECK-NEXT: ptrue p0.d
2600 ; CHECK-NEXT: fmul z0.d, z0.d, z1.d
2601 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
2602 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2603 ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
2606 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
2607 %a = fmul <vscale x 2 x double> %x, %y
2608 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
2609 ret <vscale x 2 x double> %b
2612 define <vscale x 4 x float> @fdiv_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
2613 ; CHECK-LABEL: fdiv_nxv4f32_y:
2614 ; CHECK: // %bb.0: // %entry
2615 ; CHECK-NEXT: ptrue p0.s
2616 ; CHECK-NEXT: fdiv z0.s, p0/m, z0.s, z1.s
2617 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
2618 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2619 ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
2622 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
2623 %a = fdiv <vscale x 4 x float> %x, %y
2624 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
2625 ret <vscale x 4 x float> %b
2628 define <vscale x 8 x half> @fdiv_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
2629 ; CHECK-LABEL: fdiv_nxv8f16_y:
2630 ; CHECK: // %bb.0: // %entry
2631 ; CHECK-NEXT: ptrue p0.h
2632 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
2633 ; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h
2634 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2635 ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
2638 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
2639 %a = fdiv <vscale x 8 x half> %x, %y
2640 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
2641 ret <vscale x 8 x half> %b
2644 define <vscale x 2 x double> @fdiv_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
2645 ; CHECK-LABEL: fdiv_nxv2f64_y:
2646 ; CHECK: // %bb.0: // %entry
2647 ; CHECK-NEXT: ptrue p0.d
2648 ; CHECK-NEXT: fdiv z0.d, p0/m, z0.d, z1.d
2649 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
2650 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2651 ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
2654 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
2655 %a = fdiv <vscale x 2 x double> %x, %y
2656 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
2657 ret <vscale x 2 x double> %b
2660 define <vscale x 4 x float> @minnum_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
2661 ; CHECK-LABEL: minnum_nxv4f32_y:
2662 ; CHECK: // %bb.0: // %entry
2663 ; CHECK-NEXT: ptrue p0.s
2664 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
2665 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2666 ; CHECK-NEXT: fminnm z1.s, p0/m, z1.s, z0.s
2667 ; CHECK-NEXT: mov z0.d, z1.d
2670 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
2671 %a = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x float> %y)
2672 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
2673 ret <vscale x 4 x float> %b
2676 define <vscale x 8 x half> @minnum_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
2677 ; CHECK-LABEL: minnum_nxv8f16_y:
2678 ; CHECK: // %bb.0: // %entry
2679 ; CHECK-NEXT: ptrue p0.h
2680 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
2681 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2682 ; CHECK-NEXT: fminnm z1.h, p0/m, z1.h, z0.h
2683 ; CHECK-NEXT: mov z0.d, z1.d
2686 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
2687 %a = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %x, <vscale x 8 x half> %y)
2688 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
2689 ret <vscale x 8 x half> %b
2692 define <vscale x 2 x double> @minnum_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
2693 ; CHECK-LABEL: minnum_nxv2f64_y:
2694 ; CHECK: // %bb.0: // %entry
2695 ; CHECK-NEXT: ptrue p0.d
2696 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
2697 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2698 ; CHECK-NEXT: fminnm z1.d, p0/m, z1.d, z0.d
2699 ; CHECK-NEXT: mov z0.d, z1.d
2702 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
2703 %a = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x double> %y)
2704 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
2705 ret <vscale x 2 x double> %b
2708 define <vscale x 4 x float> @maxnum_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
2709 ; CHECK-LABEL: maxnum_nxv4f32_y:
2710 ; CHECK: // %bb.0: // %entry
2711 ; CHECK-NEXT: ptrue p0.s
2712 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
2713 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2714 ; CHECK-NEXT: fmaxnm z1.s, p0/m, z1.s, z0.s
2715 ; CHECK-NEXT: mov z0.d, z1.d
2718 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
2719 %a = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x float> %y)
2720 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
2721 ret <vscale x 4 x float> %b
2724 define <vscale x 8 x half> @maxnum_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
2725 ; CHECK-LABEL: maxnum_nxv8f16_y:
2726 ; CHECK: // %bb.0: // %entry
2727 ; CHECK-NEXT: ptrue p0.h
2728 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
2729 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2730 ; CHECK-NEXT: fmaxnm z1.h, p0/m, z1.h, z0.h
2731 ; CHECK-NEXT: mov z0.d, z1.d
2734 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
2735 %a = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> %x, <vscale x 8 x half> %y)
2736 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
2737 ret <vscale x 8 x half> %b
2740 define <vscale x 2 x double> @maxnum_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
2741 ; CHECK-LABEL: maxnum_nxv2f64_y:
2742 ; CHECK: // %bb.0: // %entry
2743 ; CHECK-NEXT: ptrue p0.d
2744 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
2745 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2746 ; CHECK-NEXT: fmaxnm z1.d, p0/m, z1.d, z0.d
2747 ; CHECK-NEXT: mov z0.d, z1.d
2750 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
2751 %a = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x double> %y)
2752 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
2753 ret <vscale x 2 x double> %b
2756 define <vscale x 4 x float> @minimum_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
2757 ; CHECK-LABEL: minimum_nxv4f32_y:
2758 ; CHECK: // %bb.0: // %entry
2759 ; CHECK-NEXT: ptrue p0.s
2760 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
2761 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2762 ; CHECK-NEXT: fmin z1.s, p0/m, z1.s, z0.s
2763 ; CHECK-NEXT: mov z0.d, z1.d
2766 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
2767 %a = call <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x float> %y)
2768 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
2769 ret <vscale x 4 x float> %b
2772 define <vscale x 8 x half> @minimum_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
2773 ; CHECK-LABEL: minimum_nxv8f16_y:
2774 ; CHECK: // %bb.0: // %entry
2775 ; CHECK-NEXT: ptrue p0.h
2776 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
2777 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2778 ; CHECK-NEXT: fmin z1.h, p0/m, z1.h, z0.h
2779 ; CHECK-NEXT: mov z0.d, z1.d
2782 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
2783 %a = call <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half> %x, <vscale x 8 x half> %y)
2784 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
2785 ret <vscale x 8 x half> %b
2788 define <vscale x 2 x double> @minimum_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
2789 ; CHECK-LABEL: minimum_nxv2f64_y:
2790 ; CHECK: // %bb.0: // %entry
2791 ; CHECK-NEXT: ptrue p0.d
2792 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
2793 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2794 ; CHECK-NEXT: fmin z1.d, p0/m, z1.d, z0.d
2795 ; CHECK-NEXT: mov z0.d, z1.d
2798 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
2799 %a = call <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x double> %y)
2800 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
2801 ret <vscale x 2 x double> %b
2804 define <vscale x 4 x float> @maximum_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
2805 ; CHECK-LABEL: maximum_nxv4f32_y:
2806 ; CHECK: // %bb.0: // %entry
2807 ; CHECK-NEXT: ptrue p0.s
2808 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
2809 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2810 ; CHECK-NEXT: fmax z1.s, p0/m, z1.s, z0.s
2811 ; CHECK-NEXT: mov z0.d, z1.d
2814 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
2815 %a = call <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x float> %y)
2816 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
2817 ret <vscale x 4 x float> %b
2820 define <vscale x 8 x half> @maximum_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
2821 ; CHECK-LABEL: maximum_nxv8f16_y:
2822 ; CHECK: // %bb.0: // %entry
2823 ; CHECK-NEXT: ptrue p0.h
2824 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
2825 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2826 ; CHECK-NEXT: fmax z1.h, p0/m, z1.h, z0.h
2827 ; CHECK-NEXT: mov z0.d, z1.d
2830 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
2831 %a = call <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half> %x, <vscale x 8 x half> %y)
2832 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
2833 ret <vscale x 8 x half> %b
2836 define <vscale x 2 x double> @maximum_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
2837 ; CHECK-LABEL: maximum_nxv2f64_y:
2838 ; CHECK: // %bb.0: // %entry
2839 ; CHECK-NEXT: ptrue p0.d
2840 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
2841 ; CHECK-NEXT: not p0.b, p0/z, p1.b
2842 ; CHECK-NEXT: fmax z1.d, p0/m, z1.d, z0.d
2843 ; CHECK-NEXT: mov z0.d, z1.d
2846 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
2847 %a = call <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x double> %y)
2848 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
2849 ret <vscale x 2 x double> %b
2852 define <vscale x 4 x float> @fmai_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %z, <vscale x 4 x float> %n) {
2853 ; CHECK-LABEL: fmai_nxv4f32_y:
2854 ; CHECK: // %bb.0: // %entry
2855 ; CHECK-NEXT: ptrue p0.s
2856 ; CHECK-NEXT: fcmle p1.s, p0/z, z3.s, #0.0
2857 ; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
2858 ; CHECK-NEXT: not p1.b, p0/z, p1.b
2859 ; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
2862 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
2863 %a = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> %y, <vscale x 4 x float> %z, <vscale x 4 x float> %x)
2864 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
2865 ret <vscale x 4 x float> %b
2868 define <vscale x 8 x half> @fmai_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %z, <vscale x 8 x half> %n) {
2869 ; CHECK-LABEL: fmai_nxv8f16_y:
2870 ; CHECK: // %bb.0: // %entry
2871 ; CHECK-NEXT: ptrue p0.h
2872 ; CHECK-NEXT: fcmle p1.h, p0/z, z3.h, #0.0
2873 ; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
2874 ; CHECK-NEXT: not p1.b, p0/z, p1.b
2875 ; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
2878 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
2879 %a = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> %y, <vscale x 8 x half> %z, <vscale x 8 x half> %x)
2880 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
2881 ret <vscale x 8 x half> %b
2884 define <vscale x 2 x double> @fmai_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %z, <vscale x 2 x double> %n) {
2885 ; CHECK-LABEL: fmai_nxv2f64_y:
2886 ; CHECK: // %bb.0: // %entry
2887 ; CHECK-NEXT: ptrue p0.d
2888 ; CHECK-NEXT: fcmle p1.d, p0/z, z3.d, #0.0
2889 ; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d
2890 ; CHECK-NEXT: not p1.b, p0/z, p1.b
2891 ; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
2894 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
2895 %a = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> %y, <vscale x 2 x double> %z, <vscale x 2 x double> %x)
2896 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
2897 ret <vscale x 2 x double> %b
2900 define <vscale x 4 x float> @fma_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %z, <vscale x 4 x float> %n) {
2901 ; CHECK-LABEL: fma_nxv4f32_y:
2902 ; CHECK: // %bb.0: // %entry
2903 ; CHECK-NEXT: ptrue p0.s
2904 ; CHECK-NEXT: fcmle p1.s, p0/z, z3.s, #0.0
2905 ; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
2906 ; CHECK-NEXT: not p1.b, p0/z, p1.b
2907 ; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
2910 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
2911 %m = fmul fast <vscale x 4 x float> %y, %z
2912 %a = fadd fast <vscale x 4 x float> %m, %x
2913 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
2914 ret <vscale x 4 x float> %b
2917 define <vscale x 8 x half> @fma_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %z, <vscale x 8 x half> %n) {
2918 ; CHECK-LABEL: fma_nxv8f16_y:
2919 ; CHECK: // %bb.0: // %entry
2920 ; CHECK-NEXT: ptrue p0.h
2921 ; CHECK-NEXT: fcmle p1.h, p0/z, z3.h, #0.0
2922 ; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
2923 ; CHECK-NEXT: not p1.b, p0/z, p1.b
2924 ; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
2927 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
2928 %m = fmul fast <vscale x 8 x half> %y, %z
2929 %a = fadd fast <vscale x 8 x half> %m, %x
2930 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
2931 ret <vscale x 8 x half> %b
2934 define <vscale x 2 x double> @fma_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %z, <vscale x 2 x double> %n) {
2935 ; CHECK-LABEL: fma_nxv2f64_y:
2936 ; CHECK: // %bb.0: // %entry
2937 ; CHECK-NEXT: ptrue p0.d
2938 ; CHECK-NEXT: fcmle p1.d, p0/z, z3.d, #0.0
2939 ; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d
2940 ; CHECK-NEXT: not p1.b, p0/z, p1.b
2941 ; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
2944 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
2945 %m = fmul fast <vscale x 2 x double> %y, %z
2946 %a = fadd fast <vscale x 2 x double> %m, %x
2947 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
2948 ret <vscale x 2 x double> %b
2952 define <vscale x 4 x i32> @mul_use_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n, ptr %p) {
2953 ; CHECK-LABEL: mul_use_nxv4i32_x:
2954 ; CHECK: // %bb.0: // %entry
2955 ; CHECK-NEXT: ptrue p0.s
2956 ; CHECK-NEXT: mul z1.s, z0.s, z1.s
2957 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
2958 ; CHECK-NEXT: st1w { z1.s }, p0, [x0]
2959 ; CHECK-NEXT: mov z0.s, p1/m, z1.s
2962 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
2963 %a = mul <vscale x 4 x i32> %x, %y
2964 store <vscale x 4 x i32> %a, ptr %p
2965 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
2966 ret <vscale x 4 x i32> %b
2969 declare <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
2970 declare <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
2971 declare <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
2972 declare <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
2973 declare <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
2974 declare <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
2975 declare <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
2976 declare <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
2977 declare <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
2978 declare <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
2979 declare <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
2980 declare <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
2981 declare <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
2982 declare <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
2983 declare <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)