1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=aarch64 -mattr=+sve2 -verify-machineinstrs %s -o - | FileCheck %s
4 define <vscale x 2 x i64> @add_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
5 ; CHECK-LABEL: add_nxv2i64_x:
6 ; CHECK: // %bb.0: // %entry
7 ; CHECK-NEXT: ptrue p0.d
8 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
9 ; CHECK-NEXT: add z0.d, p0/m, z0.d, z1.d
12 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
13 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %y, <vscale x 2 x i64> zeroinitializer
14 %b = add <vscale x 2 x i64> %a, %x
15 ret <vscale x 2 x i64> %b
18 define <vscale x 4 x i32> @add_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
19 ; CHECK-LABEL: add_nxv4i32_x:
20 ; CHECK: // %bb.0: // %entry
21 ; CHECK-NEXT: ptrue p0.s
22 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
23 ; CHECK-NEXT: add z0.s, p0/m, z0.s, z1.s
26 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
27 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %y, <vscale x 4 x i32> zeroinitializer
28 %b = add <vscale x 4 x i32> %a, %x
29 ret <vscale x 4 x i32> %b
32 define <vscale x 8 x i16> @add_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
33 ; CHECK-LABEL: add_nxv8i16_x:
34 ; CHECK: // %bb.0: // %entry
35 ; CHECK-NEXT: ptrue p0.h
36 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
37 ; CHECK-NEXT: add z0.h, p0/m, z0.h, z1.h
40 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
41 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %y, <vscale x 8 x i16> zeroinitializer
42 %b = add <vscale x 8 x i16> %a, %x
43 ret <vscale x 8 x i16> %b
46 define <vscale x 16 x i8> @add_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
47 ; CHECK-LABEL: add_nxv16i8_x:
48 ; CHECK: // %bb.0: // %entry
49 ; CHECK-NEXT: ptrue p0.b
50 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
51 ; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b
54 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
55 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %y, <vscale x 16 x i8> zeroinitializer
56 %b = add <vscale x 16 x i8> %a, %x
57 ret <vscale x 16 x i8> %b
60 define <vscale x 2 x i64> @sub_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
61 ; CHECK-LABEL: sub_nxv2i64_x:
62 ; CHECK: // %bb.0: // %entry
63 ; CHECK-NEXT: ptrue p0.d
64 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
65 ; CHECK-NEXT: sub z0.d, p0/m, z0.d, z1.d
68 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
69 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %y, <vscale x 2 x i64> zeroinitializer
70 %b = sub <vscale x 2 x i64> %x, %a
71 ret <vscale x 2 x i64> %b
74 define <vscale x 4 x i32> @sub_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
75 ; CHECK-LABEL: sub_nxv4i32_x:
76 ; CHECK: // %bb.0: // %entry
77 ; CHECK-NEXT: ptrue p0.s
78 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
79 ; CHECK-NEXT: sub z0.s, p0/m, z0.s, z1.s
82 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
83 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %y, <vscale x 4 x i32> zeroinitializer
84 %b = sub <vscale x 4 x i32> %x, %a
85 ret <vscale x 4 x i32> %b
88 define <vscale x 8 x i16> @sub_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
89 ; CHECK-LABEL: sub_nxv8i16_x:
90 ; CHECK: // %bb.0: // %entry
91 ; CHECK-NEXT: ptrue p0.h
92 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
93 ; CHECK-NEXT: sub z0.h, p0/m, z0.h, z1.h
96 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
97 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %y, <vscale x 8 x i16> zeroinitializer
98 %b = sub <vscale x 8 x i16> %x, %a
99 ret <vscale x 8 x i16> %b
102 define <vscale x 16 x i8> @sub_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
103 ; CHECK-LABEL: sub_nxv16i8_x:
104 ; CHECK: // %bb.0: // %entry
105 ; CHECK-NEXT: ptrue p0.b
106 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
107 ; CHECK-NEXT: sub z0.b, p0/m, z0.b, z1.b
110 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
111 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %y, <vscale x 16 x i8> zeroinitializer
112 %b = sub <vscale x 16 x i8> %x, %a
113 ret <vscale x 16 x i8> %b
116 define <vscale x 2 x i64> @mul_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
117 ; CHECK-LABEL: mul_nxv2i64_x:
118 ; CHECK: // %bb.0: // %entry
119 ; CHECK-NEXT: ptrue p0.d
120 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
121 ; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
124 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
125 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %y, <vscale x 2 x i64> splat (i64 1)
126 %b = mul <vscale x 2 x i64> %a, %x
127 ret <vscale x 2 x i64> %b
130 define <vscale x 4 x i32> @mul_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
131 ; CHECK-LABEL: mul_nxv4i32_x:
132 ; CHECK: // %bb.0: // %entry
133 ; CHECK-NEXT: ptrue p0.s
134 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
135 ; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s
138 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
139 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %y, <vscale x 4 x i32> splat (i32 1)
140 %b = mul <vscale x 4 x i32> %a, %x
141 ret <vscale x 4 x i32> %b
144 define <vscale x 8 x i16> @mul_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
145 ; CHECK-LABEL: mul_nxv8i16_x:
146 ; CHECK: // %bb.0: // %entry
147 ; CHECK-NEXT: ptrue p0.h
148 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
149 ; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h
152 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
153 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %y, <vscale x 8 x i16> splat (i16 1)
154 %b = mul <vscale x 8 x i16> %a, %x
155 ret <vscale x 8 x i16> %b
158 define <vscale x 16 x i8> @mul_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
159 ; CHECK-LABEL: mul_nxv16i8_x:
160 ; CHECK: // %bb.0: // %entry
161 ; CHECK-NEXT: ptrue p0.b
162 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
163 ; CHECK-NEXT: mul z0.b, p0/m, z0.b, z1.b
166 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
167 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %y, <vscale x 16 x i8> splat (i8 1)
168 %b = mul <vscale x 16 x i8> %a, %x
169 ret <vscale x 16 x i8> %b
172 define <vscale x 2 x i64> @and_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
173 ; CHECK-LABEL: and_nxv2i64_x:
174 ; CHECK: // %bb.0: // %entry
175 ; CHECK-NEXT: ptrue p0.d
176 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
177 ; CHECK-NEXT: and z0.d, p0/m, z0.d, z1.d
180 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
181 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %y, <vscale x 2 x i64> splat (i64 -1)
182 %b = and <vscale x 2 x i64> %a, %x
183 ret <vscale x 2 x i64> %b
186 define <vscale x 4 x i32> @and_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
187 ; CHECK-LABEL: and_nxv4i32_x:
188 ; CHECK: // %bb.0: // %entry
189 ; CHECK-NEXT: ptrue p0.s
190 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
191 ; CHECK-NEXT: and z0.s, p0/m, z0.s, z1.s
194 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
195 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %y, <vscale x 4 x i32> splat (i32 -1)
196 %b = and <vscale x 4 x i32> %a, %x
197 ret <vscale x 4 x i32> %b
200 define <vscale x 8 x i16> @and_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
201 ; CHECK-LABEL: and_nxv8i16_x:
202 ; CHECK: // %bb.0: // %entry
203 ; CHECK-NEXT: ptrue p0.h
204 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
205 ; CHECK-NEXT: and z0.h, p0/m, z0.h, z1.h
208 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
209 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %y, <vscale x 8 x i16> splat (i16 -1)
210 %b = and <vscale x 8 x i16> %a, %x
211 ret <vscale x 8 x i16> %b
214 define <vscale x 16 x i8> @and_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
215 ; CHECK-LABEL: and_nxv16i8_x:
216 ; CHECK: // %bb.0: // %entry
217 ; CHECK-NEXT: ptrue p0.b
218 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
219 ; CHECK-NEXT: and z0.b, p0/m, z0.b, z1.b
222 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
223 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %y, <vscale x 16 x i8> splat (i8 -1)
224 %b = and <vscale x 16 x i8> %a, %x
225 ret <vscale x 16 x i8> %b
228 define <vscale x 2 x i64> @or_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
229 ; CHECK-LABEL: or_nxv2i64_x:
230 ; CHECK: // %bb.0: // %entry
231 ; CHECK-NEXT: ptrue p0.d
232 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
233 ; CHECK-NEXT: orr z0.d, p0/m, z0.d, z1.d
236 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
237 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %y, <vscale x 2 x i64> zeroinitializer
238 %b = or <vscale x 2 x i64> %a, %x
239 ret <vscale x 2 x i64> %b
242 define <vscale x 4 x i32> @or_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
243 ; CHECK-LABEL: or_nxv4i32_x:
244 ; CHECK: // %bb.0: // %entry
245 ; CHECK-NEXT: ptrue p0.s
246 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
247 ; CHECK-NEXT: orr z0.s, p0/m, z0.s, z1.s
250 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
251 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %y, <vscale x 4 x i32> zeroinitializer
252 %b = or <vscale x 4 x i32> %a, %x
253 ret <vscale x 4 x i32> %b
256 define <vscale x 8 x i16> @or_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
257 ; CHECK-LABEL: or_nxv8i16_x:
258 ; CHECK: // %bb.0: // %entry
259 ; CHECK-NEXT: ptrue p0.h
260 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
261 ; CHECK-NEXT: orr z0.h, p0/m, z0.h, z1.h
264 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
265 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %y, <vscale x 8 x i16> zeroinitializer
266 %b = or <vscale x 8 x i16> %a, %x
267 ret <vscale x 8 x i16> %b
270 define <vscale x 16 x i8> @or_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
271 ; CHECK-LABEL: or_nxv16i8_x:
272 ; CHECK: // %bb.0: // %entry
273 ; CHECK-NEXT: ptrue p0.b
274 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
275 ; CHECK-NEXT: orr z0.b, p0/m, z0.b, z1.b
278 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
279 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %y, <vscale x 16 x i8> zeroinitializer
280 %b = or <vscale x 16 x i8> %a, %x
281 ret <vscale x 16 x i8> %b
284 define <vscale x 2 x i64> @xor_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
285 ; CHECK-LABEL: xor_nxv2i64_x:
286 ; CHECK: // %bb.0: // %entry
287 ; CHECK-NEXT: ptrue p0.d
288 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
289 ; CHECK-NEXT: eor z0.d, p0/m, z0.d, z1.d
292 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
293 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %y, <vscale x 2 x i64> zeroinitializer
294 %b = xor <vscale x 2 x i64> %a, %x
295 ret <vscale x 2 x i64> %b
298 define <vscale x 4 x i32> @xor_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
299 ; CHECK-LABEL: xor_nxv4i32_x:
300 ; CHECK: // %bb.0: // %entry
301 ; CHECK-NEXT: ptrue p0.s
302 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
303 ; CHECK-NEXT: eor z0.s, p0/m, z0.s, z1.s
306 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
307 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %y, <vscale x 4 x i32> zeroinitializer
308 %b = xor <vscale x 4 x i32> %a, %x
309 ret <vscale x 4 x i32> %b
312 define <vscale x 8 x i16> @xor_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
313 ; CHECK-LABEL: xor_nxv8i16_x:
314 ; CHECK: // %bb.0: // %entry
315 ; CHECK-NEXT: ptrue p0.h
316 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
317 ; CHECK-NEXT: eor z0.h, p0/m, z0.h, z1.h
320 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
321 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %y, <vscale x 8 x i16> zeroinitializer
322 %b = xor <vscale x 8 x i16> %a, %x
323 ret <vscale x 8 x i16> %b
326 define <vscale x 16 x i8> @xor_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
327 ; CHECK-LABEL: xor_nxv16i8_x:
328 ; CHECK: // %bb.0: // %entry
329 ; CHECK-NEXT: ptrue p0.b
330 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
331 ; CHECK-NEXT: eor z0.b, p0/m, z0.b, z1.b
334 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
335 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %y, <vscale x 16 x i8> zeroinitializer
336 %b = xor <vscale x 16 x i8> %a, %x
337 ret <vscale x 16 x i8> %b
340 define <vscale x 2 x i64> @shl_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
341 ; CHECK-LABEL: shl_nxv2i64_x:
342 ; CHECK: // %bb.0: // %entry
343 ; CHECK-NEXT: ptrue p0.d
344 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
345 ; CHECK-NEXT: lslr z1.d, p0/m, z1.d, z0.d
346 ; CHECK-NEXT: mov z0.d, p1/m, z1.d
349 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
350 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %y, <vscale x 2 x i64> zeroinitializer
351 %b = shl <vscale x 2 x i64> %x, %a
352 ret <vscale x 2 x i64> %b
355 define <vscale x 4 x i32> @shl_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
356 ; CHECK-LABEL: shl_nxv4i32_x:
357 ; CHECK: // %bb.0: // %entry
358 ; CHECK-NEXT: ptrue p0.s
359 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
360 ; CHECK-NEXT: lslr z1.s, p0/m, z1.s, z0.s
361 ; CHECK-NEXT: mov z0.s, p1/m, z1.s
364 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
365 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %y, <vscale x 4 x i32> zeroinitializer
366 %b = shl <vscale x 4 x i32> %x, %a
367 ret <vscale x 4 x i32> %b
370 define <vscale x 8 x i16> @shl_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
371 ; CHECK-LABEL: shl_nxv8i16_x:
372 ; CHECK: // %bb.0: // %entry
373 ; CHECK-NEXT: ptrue p0.h
374 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
375 ; CHECK-NEXT: lslr z1.h, p0/m, z1.h, z0.h
376 ; CHECK-NEXT: mov z0.h, p1/m, z1.h
379 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
380 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %y, <vscale x 8 x i16> zeroinitializer
381 %b = shl <vscale x 8 x i16> %x, %a
382 ret <vscale x 8 x i16> %b
385 define <vscale x 16 x i8> @shl_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
386 ; CHECK-LABEL: shl_nxv16i8_x:
387 ; CHECK: // %bb.0: // %entry
388 ; CHECK-NEXT: ptrue p0.b
389 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
390 ; CHECK-NEXT: lslr z1.b, p0/m, z1.b, z0.b
391 ; CHECK-NEXT: mov z0.b, p1/m, z1.b
394 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
395 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %y, <vscale x 16 x i8> zeroinitializer
396 %b = shl <vscale x 16 x i8> %x, %a
397 ret <vscale x 16 x i8> %b
400 define <vscale x 2 x i64> @ashr_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
401 ; CHECK-LABEL: ashr_nxv2i64_x:
402 ; CHECK: // %bb.0: // %entry
403 ; CHECK-NEXT: ptrue p0.d
404 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
405 ; CHECK-NEXT: asrr z1.d, p0/m, z1.d, z0.d
406 ; CHECK-NEXT: mov z0.d, p1/m, z1.d
409 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
410 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %y, <vscale x 2 x i64> zeroinitializer
411 %b = ashr <vscale x 2 x i64> %x, %a
412 ret <vscale x 2 x i64> %b
415 define <vscale x 4 x i32> @ashr_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
416 ; CHECK-LABEL: ashr_nxv4i32_x:
417 ; CHECK: // %bb.0: // %entry
418 ; CHECK-NEXT: ptrue p0.s
419 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
420 ; CHECK-NEXT: asrr z1.s, p0/m, z1.s, z0.s
421 ; CHECK-NEXT: mov z0.s, p1/m, z1.s
424 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
425 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %y, <vscale x 4 x i32> zeroinitializer
426 %b = ashr <vscale x 4 x i32> %x, %a
427 ret <vscale x 4 x i32> %b
430 define <vscale x 8 x i16> @ashr_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
431 ; CHECK-LABEL: ashr_nxv8i16_x:
432 ; CHECK: // %bb.0: // %entry
433 ; CHECK-NEXT: ptrue p0.h
434 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
435 ; CHECK-NEXT: asrr z1.h, p0/m, z1.h, z0.h
436 ; CHECK-NEXT: mov z0.h, p1/m, z1.h
439 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
440 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %y, <vscale x 8 x i16> zeroinitializer
441 %b = ashr <vscale x 8 x i16> %x, %a
442 ret <vscale x 8 x i16> %b
445 define <vscale x 16 x i8> @ashr_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
446 ; CHECK-LABEL: ashr_nxv16i8_x:
447 ; CHECK: // %bb.0: // %entry
448 ; CHECK-NEXT: ptrue p0.b
449 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
450 ; CHECK-NEXT: asrr z1.b, p0/m, z1.b, z0.b
451 ; CHECK-NEXT: mov z0.b, p1/m, z1.b
454 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
455 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %y, <vscale x 16 x i8> zeroinitializer
456 %b = ashr <vscale x 16 x i8> %x, %a
457 ret <vscale x 16 x i8> %b
460 define <vscale x 2 x i64> @lshr_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
461 ; CHECK-LABEL: lshr_nxv2i64_x:
462 ; CHECK: // %bb.0: // %entry
463 ; CHECK-NEXT: ptrue p0.d
464 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
465 ; CHECK-NEXT: lsrr z1.d, p0/m, z1.d, z0.d
466 ; CHECK-NEXT: mov z0.d, p1/m, z1.d
469 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
470 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %y, <vscale x 2 x i64> zeroinitializer
471 %b = lshr <vscale x 2 x i64> %x, %a
472 ret <vscale x 2 x i64> %b
475 define <vscale x 4 x i32> @lshr_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
476 ; CHECK-LABEL: lshr_nxv4i32_x:
477 ; CHECK: // %bb.0: // %entry
478 ; CHECK-NEXT: ptrue p0.s
479 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
480 ; CHECK-NEXT: lsrr z1.s, p0/m, z1.s, z0.s
481 ; CHECK-NEXT: mov z0.s, p1/m, z1.s
484 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
485 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %y, <vscale x 4 x i32> zeroinitializer
486 %b = lshr <vscale x 4 x i32> %x, %a
487 ret <vscale x 4 x i32> %b
490 define <vscale x 8 x i16> @lshr_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
491 ; CHECK-LABEL: lshr_nxv8i16_x:
492 ; CHECK: // %bb.0: // %entry
493 ; CHECK-NEXT: ptrue p0.h
494 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
495 ; CHECK-NEXT: lsrr z1.h, p0/m, z1.h, z0.h
496 ; CHECK-NEXT: mov z0.h, p1/m, z1.h
499 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
500 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %y, <vscale x 8 x i16> zeroinitializer
501 %b = lshr <vscale x 8 x i16> %x, %a
502 ret <vscale x 8 x i16> %b
505 define <vscale x 16 x i8> @lshr_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
506 ; CHECK-LABEL: lshr_nxv16i8_x:
507 ; CHECK: // %bb.0: // %entry
508 ; CHECK-NEXT: ptrue p0.b
509 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
510 ; CHECK-NEXT: lsrr z1.b, p0/m, z1.b, z0.b
511 ; CHECK-NEXT: mov z0.b, p1/m, z1.b
514 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
515 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %y, <vscale x 16 x i8> zeroinitializer
516 %b = lshr <vscale x 16 x i8> %x, %a
517 ret <vscale x 16 x i8> %b
520 define <vscale x 2 x i64> @mla_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %z, <vscale x 2 x i64> %n) {
521 ; CHECK-LABEL: mla_nxv2i64_x:
522 ; CHECK: // %bb.0: // %entry
523 ; CHECK-NEXT: ptrue p0.d
524 ; CHECK-NEXT: cmpgt p0.d, p0/z, z3.d, #0
525 ; CHECK-NEXT: mla z0.d, p0/m, z1.d, z2.d
528 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
529 %m = mul <vscale x 2 x i64> %y, %z
530 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %m, <vscale x 2 x i64> zeroinitializer
531 %b = add <vscale x 2 x i64> %a, %x
532 ret <vscale x 2 x i64> %b
535 define <vscale x 4 x i32> @mla_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %z, <vscale x 4 x i32> %n) {
536 ; CHECK-LABEL: mla_nxv4i32_x:
537 ; CHECK: // %bb.0: // %entry
538 ; CHECK-NEXT: ptrue p0.s
539 ; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, #0
540 ; CHECK-NEXT: mla z0.s, p0/m, z1.s, z2.s
543 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
544 %m = mul <vscale x 4 x i32> %y, %z
545 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %m, <vscale x 4 x i32> zeroinitializer
546 %b = add <vscale x 4 x i32> %a, %x
547 ret <vscale x 4 x i32> %b
550 define <vscale x 8 x i16> @mla_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %z, <vscale x 8 x i16> %n) {
551 ; CHECK-LABEL: mla_nxv8i16_x:
552 ; CHECK: // %bb.0: // %entry
553 ; CHECK-NEXT: ptrue p0.h
554 ; CHECK-NEXT: cmpgt p0.h, p0/z, z3.h, #0
555 ; CHECK-NEXT: mla z0.h, p0/m, z1.h, z2.h
558 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
559 %m = mul <vscale x 8 x i16> %y, %z
560 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %m, <vscale x 8 x i16> zeroinitializer
561 %b = add <vscale x 8 x i16> %a, %x
562 ret <vscale x 8 x i16> %b
565 define <vscale x 16 x i8> @mla_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z, <vscale x 16 x i8> %n) {
566 ; CHECK-LABEL: mla_nxv16i8_x:
567 ; CHECK: // %bb.0: // %entry
568 ; CHECK-NEXT: ptrue p0.b
569 ; CHECK-NEXT: cmpgt p0.b, p0/z, z3.b, #0
570 ; CHECK-NEXT: mla z0.b, p0/m, z1.b, z2.b
573 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
574 %m = mul <vscale x 16 x i8> %y, %z
575 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %m, <vscale x 16 x i8> zeroinitializer
576 %b = add <vscale x 16 x i8> %a, %x
577 ret <vscale x 16 x i8> %b
580 define <vscale x 2 x i64> @mls_nxv2i64_x(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %z, <vscale x 2 x i64> %n) {
581 ; CHECK-LABEL: mls_nxv2i64_x:
582 ; CHECK: // %bb.0: // %entry
583 ; CHECK-NEXT: ptrue p0.d
584 ; CHECK-NEXT: cmpgt p0.d, p0/z, z3.d, #0
585 ; CHECK-NEXT: msb z0.d, p0/m, z1.d, z2.d
588 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
589 %m = mul <vscale x 2 x i64> %x, %y
590 %a = sub <vscale x 2 x i64> %z, %m
591 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %x
592 ret <vscale x 2 x i64> %b
595 define <vscale x 4 x i32> @mls_nxv4i32_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %z, <vscale x 4 x i32> %n) {
596 ; CHECK-LABEL: mls_nxv4i32_x:
597 ; CHECK: // %bb.0: // %entry
598 ; CHECK-NEXT: ptrue p0.s
599 ; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, #0
600 ; CHECK-NEXT: msb z0.s, p0/m, z1.s, z2.s
603 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
604 %m = mul <vscale x 4 x i32> %x, %y
605 %a = sub <vscale x 4 x i32> %z, %m
606 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %x
607 ret <vscale x 4 x i32> %b
610 define <vscale x 8 x i16> @mls_nxv8i16_x(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %z, <vscale x 8 x i16> %n) {
611 ; CHECK-LABEL: mls_nxv8i16_x:
612 ; CHECK: // %bb.0: // %entry
613 ; CHECK-NEXT: ptrue p0.h
614 ; CHECK-NEXT: cmpgt p0.h, p0/z, z3.h, #0
615 ; CHECK-NEXT: msb z0.h, p0/m, z1.h, z2.h
618 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
619 %m = mul <vscale x 8 x i16> %x, %y
620 %a = sub <vscale x 8 x i16> %z, %m
621 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %x
622 ret <vscale x 8 x i16> %b
625 define <vscale x 16 x i8> @mls_nxv16i8_x(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z, <vscale x 16 x i8> %n) {
626 ; CHECK-LABEL: mls_nxv16i8_x:
627 ; CHECK: // %bb.0: // %entry
628 ; CHECK-NEXT: ptrue p0.b
629 ; CHECK-NEXT: cmpgt p0.b, p0/z, z3.b, #0
630 ; CHECK-NEXT: msb z0.b, p0/m, z1.b, z2.b
633 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
634 %m = mul <vscale x 16 x i8> %x, %y
635 %a = sub <vscale x 16 x i8> %z, %m
636 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %x
637 ret <vscale x 16 x i8> %b
640 define <vscale x 4 x float> @fadd_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
641 ; CHECK-LABEL: fadd_nxv4f32_x:
642 ; CHECK: // %bb.0: // %entry
643 ; CHECK-NEXT: ptrue p0.s
644 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
645 ; CHECK-NEXT: not p0.b, p0/z, p1.b
646 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
649 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
650 %a = select <vscale x 4 x i1> %c, <vscale x 4 x float> %y, <vscale x 4 x float> splat (float -0.000000e+00)
651 %b = fadd <vscale x 4 x float> %a, %x
652 ret <vscale x 4 x float> %b
655 define <vscale x 8 x half> @fadd_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
656 ; CHECK-LABEL: fadd_nxv8f16_x:
657 ; CHECK: // %bb.0: // %entry
658 ; CHECK-NEXT: ptrue p0.h
659 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
660 ; CHECK-NEXT: not p0.b, p0/z, p1.b
661 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
664 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
665 %a = select <vscale x 8 x i1> %c, <vscale x 8 x half> %y, <vscale x 8 x half> splat (half 0xH8000)
666 %b = fadd <vscale x 8 x half> %a, %x
667 ret <vscale x 8 x half> %b
670 define <vscale x 2 x double> @fadd_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
671 ; CHECK-LABEL: fadd_nxv2f64_x:
672 ; CHECK: // %bb.0: // %entry
673 ; CHECK-NEXT: ptrue p0.d
674 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
675 ; CHECK-NEXT: not p0.b, p0/z, p1.b
676 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
679 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
680 %a = select <vscale x 2 x i1> %c, <vscale x 2 x double> %y, <vscale x 2 x double> splat (double -0.000000e+00)
681 %b = fadd <vscale x 2 x double> %a, %x
682 ret <vscale x 2 x double> %b
685 define <vscale x 4 x float> @fsub_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
686 ; CHECK-LABEL: fsub_nxv4f32_x:
687 ; CHECK: // %bb.0: // %entry
688 ; CHECK-NEXT: ptrue p0.s
689 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
690 ; CHECK-NEXT: not p0.b, p0/z, p1.b
691 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s
694 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
695 %a = select <vscale x 4 x i1> %c, <vscale x 4 x float> %y, <vscale x 4 x float> zeroinitializer
696 %b = fsub <vscale x 4 x float> %x, %a
697 ret <vscale x 4 x float> %b
700 define <vscale x 8 x half> @fsub_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
701 ; CHECK-LABEL: fsub_nxv8f16_x:
702 ; CHECK: // %bb.0: // %entry
703 ; CHECK-NEXT: ptrue p0.h
704 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
705 ; CHECK-NEXT: not p0.b, p0/z, p1.b
706 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h
709 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
710 %a = select <vscale x 8 x i1> %c, <vscale x 8 x half> %y, <vscale x 8 x half> zeroinitializer
711 %b = fsub <vscale x 8 x half> %x, %a
712 ret <vscale x 8 x half> %b
715 define <vscale x 2 x double> @fsub_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
716 ; CHECK-LABEL: fsub_nxv2f64_x:
717 ; CHECK: // %bb.0: // %entry
718 ; CHECK-NEXT: ptrue p0.d
719 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
720 ; CHECK-NEXT: not p0.b, p0/z, p1.b
721 ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d
724 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
725 %a = select <vscale x 2 x i1> %c, <vscale x 2 x double> %y, <vscale x 2 x double> zeroinitializer
726 %b = fsub <vscale x 2 x double> %x, %a
727 ret <vscale x 2 x double> %b
730 define <vscale x 4 x float> @fmul_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
731 ; CHECK-LABEL: fmul_nxv4f32_x:
732 ; CHECK: // %bb.0: // %entry
733 ; CHECK-NEXT: ptrue p0.s
734 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
735 ; CHECK-NEXT: not p0.b, p0/z, p1.b
736 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s
739 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
740 %a = select <vscale x 4 x i1> %c, <vscale x 4 x float> %y, <vscale x 4 x float> splat (float 1.000000e+00)
741 %b = fmul <vscale x 4 x float> %a, %x
742 ret <vscale x 4 x float> %b
745 define <vscale x 8 x half> @fmul_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
746 ; CHECK-LABEL: fmul_nxv8f16_x:
747 ; CHECK: // %bb.0: // %entry
748 ; CHECK-NEXT: ptrue p0.h
749 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
750 ; CHECK-NEXT: not p0.b, p0/z, p1.b
751 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
754 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
755 %a = select <vscale x 8 x i1> %c, <vscale x 8 x half> %y, <vscale x 8 x half> splat (half 0xH3C00)
756 %b = fmul <vscale x 8 x half> %a, %x
757 ret <vscale x 8 x half> %b
760 define <vscale x 2 x double> @fmul_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
761 ; CHECK-LABEL: fmul_nxv2f64_x:
762 ; CHECK: // %bb.0: // %entry
763 ; CHECK-NEXT: ptrue p0.d
764 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
765 ; CHECK-NEXT: not p0.b, p0/z, p1.b
766 ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d
769 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
770 %a = select <vscale x 2 x i1> %c, <vscale x 2 x double> %y, <vscale x 2 x double> splat (double 1.000000e+00)
771 %b = fmul <vscale x 2 x double> %a, %x
772 ret <vscale x 2 x double> %b
775 define <vscale x 4 x float> @fdiv_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
776 ; CHECK-LABEL: fdiv_nxv4f32_x:
777 ; CHECK: // %bb.0: // %entry
778 ; CHECK-NEXT: ptrue p0.s
779 ; CHECK-NEXT: fdivr z1.s, p0/m, z1.s, z0.s
780 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
781 ; CHECK-NEXT: not p0.b, p0/z, p1.b
782 ; CHECK-NEXT: mov z0.s, p0/m, z1.s
785 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
786 %a = select <vscale x 4 x i1> %c, <vscale x 4 x float> %y, <vscale x 4 x float> splat (float 1.000000e+00)
787 %b = fdiv <vscale x 4 x float> %x, %a
788 ret <vscale x 4 x float> %b
791 define <vscale x 8 x half> @fdiv_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
792 ; CHECK-LABEL: fdiv_nxv8f16_x:
793 ; CHECK: // %bb.0: // %entry
794 ; CHECK-NEXT: ptrue p0.h
795 ; CHECK-NEXT: fdivr z1.h, p0/m, z1.h, z0.h
796 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
797 ; CHECK-NEXT: not p0.b, p0/z, p1.b
798 ; CHECK-NEXT: mov z0.h, p0/m, z1.h
801 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
802 %a = select <vscale x 8 x i1> %c, <vscale x 8 x half> %y, <vscale x 8 x half> splat (half 0xH3C00)
803 %b = fdiv <vscale x 8 x half> %x, %a
804 ret <vscale x 8 x half> %b
807 define <vscale x 2 x double> @fdiv_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
808 ; CHECK-LABEL: fdiv_nxv2f64_x:
809 ; CHECK: // %bb.0: // %entry
810 ; CHECK-NEXT: ptrue p0.d
811 ; CHECK-NEXT: fdivr z1.d, p0/m, z1.d, z0.d
812 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
813 ; CHECK-NEXT: not p0.b, p0/z, p1.b
814 ; CHECK-NEXT: mov z0.d, p0/m, z1.d
817 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
818 %a = select <vscale x 2 x i1> %c, <vscale x 2 x double> %y, <vscale x 2 x double> splat (double 1.000000e+00)
819 %b = fdiv <vscale x 2 x double> %x, %a
820 ret <vscale x 2 x double> %b
823 define <vscale x 4 x float> @fma_nxv4f32_x(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %z, <vscale x 4 x float> %n) {
824 ; CHECK-LABEL: fma_nxv4f32_x:
825 ; CHECK: // %bb.0: // %entry
826 ; CHECK-NEXT: ptrue p0.s
827 ; CHECK-NEXT: fcmle p1.s, p0/z, z3.s, #0.0
828 ; CHECK-NEXT: not p0.b, p0/z, p1.b
829 ; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
832 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
833 %m = fmul fast <vscale x 4 x float> %y, %z
834 %a = select <vscale x 4 x i1> %c, <vscale x 4 x float> %m, <vscale x 4 x float> splat (float -0.000000e+00)
835 %b = fadd fast <vscale x 4 x float> %a, %x
836 ret <vscale x 4 x float> %b
839 define <vscale x 8 x half> @fma_nxv8f16_x(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %z, <vscale x 8 x half> %n) {
840 ; CHECK-LABEL: fma_nxv8f16_x:
841 ; CHECK: // %bb.0: // %entry
842 ; CHECK-NEXT: ptrue p0.h
843 ; CHECK-NEXT: fcmle p1.h, p0/z, z3.h, #0.0
844 ; CHECK-NEXT: not p0.b, p0/z, p1.b
845 ; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
848 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
849 %m = fmul fast <vscale x 8 x half> %y, %z
850 %a = select <vscale x 8 x i1> %c, <vscale x 8 x half> %m, <vscale x 8 x half> splat (half 0xH8000)
851 %b = fadd fast <vscale x 8 x half> %a, %x
852 ret <vscale x 8 x half> %b
855 define <vscale x 2 x double> @fma_nxv2f64_x(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %z, <vscale x 2 x double> %n) {
856 ; CHECK-LABEL: fma_nxv2f64_x:
857 ; CHECK: // %bb.0: // %entry
858 ; CHECK-NEXT: ptrue p0.d
859 ; CHECK-NEXT: fcmle p1.d, p0/z, z3.d, #0.0
860 ; CHECK-NEXT: not p0.b, p0/z, p1.b
861 ; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d
864 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
865 %m = fmul fast <vscale x 2 x double> %y, %z
866 %a = select <vscale x 2 x i1> %c, <vscale x 2 x double> %m, <vscale x 2 x double> splat (double -0.000000e+00)
867 %b = fadd fast <vscale x 2 x double> %a, %x
868 ret <vscale x 2 x double> %b
871 define <vscale x 2 x i64> @add_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
872 ; CHECK-LABEL: add_nxv2i64_y:
873 ; CHECK: // %bb.0: // %entry
874 ; CHECK-NEXT: ptrue p0.d
875 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
876 ; CHECK-NEXT: add z1.d, p0/m, z1.d, z0.d
877 ; CHECK-NEXT: mov z0.d, z1.d
880 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
881 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %x, <vscale x 2 x i64> zeroinitializer
882 %b = add <vscale x 2 x i64> %a, %y
883 ret <vscale x 2 x i64> %b
886 define <vscale x 4 x i32> @add_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
887 ; CHECK-LABEL: add_nxv4i32_y:
888 ; CHECK: // %bb.0: // %entry
889 ; CHECK-NEXT: ptrue p0.s
890 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
891 ; CHECK-NEXT: add z1.s, p0/m, z1.s, z0.s
892 ; CHECK-NEXT: mov z0.d, z1.d
895 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
896 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %x, <vscale x 4 x i32> zeroinitializer
897 %b = add <vscale x 4 x i32> %a, %y
898 ret <vscale x 4 x i32> %b
901 define <vscale x 8 x i16> @add_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
902 ; CHECK-LABEL: add_nxv8i16_y:
903 ; CHECK: // %bb.0: // %entry
904 ; CHECK-NEXT: ptrue p0.h
905 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
906 ; CHECK-NEXT: add z1.h, p0/m, z1.h, z0.h
907 ; CHECK-NEXT: mov z0.d, z1.d
910 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
911 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %x, <vscale x 8 x i16> zeroinitializer
912 %b = add <vscale x 8 x i16> %a, %y
913 ret <vscale x 8 x i16> %b
916 define <vscale x 16 x i8> @add_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
917 ; CHECK-LABEL: add_nxv16i8_y:
918 ; CHECK: // %bb.0: // %entry
919 ; CHECK-NEXT: ptrue p0.b
920 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
921 ; CHECK-NEXT: add z1.b, p0/m, z1.b, z0.b
922 ; CHECK-NEXT: mov z0.d, z1.d
925 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
926 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %x, <vscale x 16 x i8> zeroinitializer
927 %b = add <vscale x 16 x i8> %a, %y
928 ret <vscale x 16 x i8> %b
931 define <vscale x 2 x i64> @sub_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
932 ; CHECK-LABEL: sub_nxv2i64_y:
933 ; CHECK: // %bb.0: // %entry
934 ; CHECK-NEXT: ptrue p0.d
935 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
936 ; CHECK-NEXT: subr z1.d, p0/m, z1.d, z0.d
937 ; CHECK-NEXT: mov z0.d, z1.d
940 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
941 %a = sub <vscale x 2 x i64> %x, %y
942 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
943 ret <vscale x 2 x i64> %b
946 define <vscale x 4 x i32> @sub_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
947 ; CHECK-LABEL: sub_nxv4i32_y:
948 ; CHECK: // %bb.0: // %entry
949 ; CHECK-NEXT: ptrue p0.s
950 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
951 ; CHECK-NEXT: subr z1.s, p0/m, z1.s, z0.s
952 ; CHECK-NEXT: mov z0.d, z1.d
955 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
956 %a = sub <vscale x 4 x i32> %x, %y
957 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
958 ret <vscale x 4 x i32> %b
961 define <vscale x 8 x i16> @sub_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
962 ; CHECK-LABEL: sub_nxv8i16_y:
963 ; CHECK: // %bb.0: // %entry
964 ; CHECK-NEXT: ptrue p0.h
965 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
966 ; CHECK-NEXT: subr z1.h, p0/m, z1.h, z0.h
967 ; CHECK-NEXT: mov z0.d, z1.d
970 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
971 %a = sub <vscale x 8 x i16> %x, %y
972 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
973 ret <vscale x 8 x i16> %b
976 define <vscale x 16 x i8> @sub_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
977 ; CHECK-LABEL: sub_nxv16i8_y:
978 ; CHECK: // %bb.0: // %entry
979 ; CHECK-NEXT: ptrue p0.b
980 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
981 ; CHECK-NEXT: subr z1.b, p0/m, z1.b, z0.b
982 ; CHECK-NEXT: mov z0.d, z1.d
985 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
986 %a = sub <vscale x 16 x i8> %x, %y
987 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
988 ret <vscale x 16 x i8> %b
991 define <vscale x 2 x i64> @mul_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
992 ; CHECK-LABEL: mul_nxv2i64_y:
993 ; CHECK: // %bb.0: // %entry
994 ; CHECK-NEXT: ptrue p0.d
995 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
996 ; CHECK-NEXT: mul z1.d, p0/m, z1.d, z0.d
997 ; CHECK-NEXT: mov z0.d, z1.d
1000 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1001 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %x, <vscale x 2 x i64> splat (i64 1)
1002 %b = mul <vscale x 2 x i64> %a, %y
1003 ret <vscale x 2 x i64> %b
1006 define <vscale x 4 x i32> @mul_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1007 ; CHECK-LABEL: mul_nxv4i32_y:
1008 ; CHECK: // %bb.0: // %entry
1009 ; CHECK-NEXT: ptrue p0.s
1010 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1011 ; CHECK-NEXT: mul z1.s, p0/m, z1.s, z0.s
1012 ; CHECK-NEXT: mov z0.d, z1.d
1015 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1016 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %x, <vscale x 4 x i32> splat (i32 1)
1017 %b = mul <vscale x 4 x i32> %a, %y
1018 ret <vscale x 4 x i32> %b
1021 define <vscale x 8 x i16> @mul_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1022 ; CHECK-LABEL: mul_nxv8i16_y:
1023 ; CHECK: // %bb.0: // %entry
1024 ; CHECK-NEXT: ptrue p0.h
1025 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1026 ; CHECK-NEXT: mul z1.h, p0/m, z1.h, z0.h
1027 ; CHECK-NEXT: mov z0.d, z1.d
1030 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1031 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %x, <vscale x 8 x i16> splat (i16 1)
1032 %b = mul <vscale x 8 x i16> %a, %y
1033 ret <vscale x 8 x i16> %b
1036 define <vscale x 16 x i8> @mul_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1037 ; CHECK-LABEL: mul_nxv16i8_y:
1038 ; CHECK: // %bb.0: // %entry
1039 ; CHECK-NEXT: ptrue p0.b
1040 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1041 ; CHECK-NEXT: mul z1.b, p0/m, z1.b, z0.b
1042 ; CHECK-NEXT: mov z0.d, z1.d
1045 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1046 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %x, <vscale x 16 x i8> splat (i8 1)
1047 %b = mul <vscale x 16 x i8> %a, %y
1048 ret <vscale x 16 x i8> %b
1051 define <vscale x 2 x i64> @and_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1052 ; CHECK-LABEL: and_nxv2i64_y:
1053 ; CHECK: // %bb.0: // %entry
1054 ; CHECK-NEXT: ptrue p0.d
1055 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1056 ; CHECK-NEXT: and z1.d, p0/m, z1.d, z0.d
1057 ; CHECK-NEXT: mov z0.d, z1.d
1060 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1061 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %x, <vscale x 2 x i64> splat (i64 -1)
1062 %b = and <vscale x 2 x i64> %a, %y
1063 ret <vscale x 2 x i64> %b
1066 define <vscale x 4 x i32> @and_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1067 ; CHECK-LABEL: and_nxv4i32_y:
1068 ; CHECK: // %bb.0: // %entry
1069 ; CHECK-NEXT: ptrue p0.s
1070 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1071 ; CHECK-NEXT: and z1.s, p0/m, z1.s, z0.s
1072 ; CHECK-NEXT: mov z0.d, z1.d
1075 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1076 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %x, <vscale x 4 x i32> splat (i32 -1)
1077 %b = and <vscale x 4 x i32> %a, %y
1078 ret <vscale x 4 x i32> %b
1081 define <vscale x 8 x i16> @and_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1082 ; CHECK-LABEL: and_nxv8i16_y:
1083 ; CHECK: // %bb.0: // %entry
1084 ; CHECK-NEXT: ptrue p0.h
1085 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1086 ; CHECK-NEXT: and z1.h, p0/m, z1.h, z0.h
1087 ; CHECK-NEXT: mov z0.d, z1.d
1090 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1091 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %x, <vscale x 8 x i16> splat (i16 -1)
1092 %b = and <vscale x 8 x i16> %a, %y
1093 ret <vscale x 8 x i16> %b
1096 define <vscale x 16 x i8> @and_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1097 ; CHECK-LABEL: and_nxv16i8_y:
1098 ; CHECK: // %bb.0: // %entry
1099 ; CHECK-NEXT: ptrue p0.b
1100 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1101 ; CHECK-NEXT: and z1.b, p0/m, z1.b, z0.b
1102 ; CHECK-NEXT: mov z0.d, z1.d
1105 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1106 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %x, <vscale x 16 x i8> splat (i8 -1)
1107 %b = and <vscale x 16 x i8> %a, %y
1108 ret <vscale x 16 x i8> %b
1111 define <vscale x 2 x i64> @or_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1112 ; CHECK-LABEL: or_nxv2i64_y:
1113 ; CHECK: // %bb.0: // %entry
1114 ; CHECK-NEXT: ptrue p0.d
1115 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1116 ; CHECK-NEXT: orr z1.d, p0/m, z1.d, z0.d
1117 ; CHECK-NEXT: mov z0.d, z1.d
1120 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1121 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %x, <vscale x 2 x i64> zeroinitializer
1122 %b = or <vscale x 2 x i64> %a, %y
1123 ret <vscale x 2 x i64> %b
1126 define <vscale x 4 x i32> @or_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1127 ; CHECK-LABEL: or_nxv4i32_y:
1128 ; CHECK: // %bb.0: // %entry
1129 ; CHECK-NEXT: ptrue p0.s
1130 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1131 ; CHECK-NEXT: orr z1.s, p0/m, z1.s, z0.s
1132 ; CHECK-NEXT: mov z0.d, z1.d
1135 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1136 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %x, <vscale x 4 x i32> zeroinitializer
1137 %b = or <vscale x 4 x i32> %a, %y
1138 ret <vscale x 4 x i32> %b
1141 define <vscale x 8 x i16> @or_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1142 ; CHECK-LABEL: or_nxv8i16_y:
1143 ; CHECK: // %bb.0: // %entry
1144 ; CHECK-NEXT: ptrue p0.h
1145 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1146 ; CHECK-NEXT: orr z1.h, p0/m, z1.h, z0.h
1147 ; CHECK-NEXT: mov z0.d, z1.d
1150 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1151 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %x, <vscale x 8 x i16> zeroinitializer
1152 %b = or <vscale x 8 x i16> %a, %y
1153 ret <vscale x 8 x i16> %b
1156 define <vscale x 16 x i8> @or_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1157 ; CHECK-LABEL: or_nxv16i8_y:
1158 ; CHECK: // %bb.0: // %entry
1159 ; CHECK-NEXT: ptrue p0.b
1160 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1161 ; CHECK-NEXT: orr z1.b, p0/m, z1.b, z0.b
1162 ; CHECK-NEXT: mov z0.d, z1.d
1165 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1166 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %x, <vscale x 16 x i8> zeroinitializer
1167 %b = or <vscale x 16 x i8> %a, %y
1168 ret <vscale x 16 x i8> %b
1171 define <vscale x 2 x i64> @xor_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1172 ; CHECK-LABEL: xor_nxv2i64_y:
1173 ; CHECK: // %bb.0: // %entry
1174 ; CHECK-NEXT: ptrue p0.d
1175 ; CHECK-NEXT: cmpgt p0.d, p0/z, z2.d, #0
1176 ; CHECK-NEXT: eor z1.d, p0/m, z1.d, z0.d
1177 ; CHECK-NEXT: mov z0.d, z1.d
1180 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1181 %a = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %x, <vscale x 2 x i64> zeroinitializer
1182 %b = xor <vscale x 2 x i64> %a, %y
1183 ret <vscale x 2 x i64> %b
1186 define <vscale x 4 x i32> @xor_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1187 ; CHECK-LABEL: xor_nxv4i32_y:
1188 ; CHECK: // %bb.0: // %entry
1189 ; CHECK-NEXT: ptrue p0.s
1190 ; CHECK-NEXT: cmpgt p0.s, p0/z, z2.s, #0
1191 ; CHECK-NEXT: eor z1.s, p0/m, z1.s, z0.s
1192 ; CHECK-NEXT: mov z0.d, z1.d
1195 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1196 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %x, <vscale x 4 x i32> zeroinitializer
1197 %b = xor <vscale x 4 x i32> %a, %y
1198 ret <vscale x 4 x i32> %b
1201 define <vscale x 8 x i16> @xor_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1202 ; CHECK-LABEL: xor_nxv8i16_y:
1203 ; CHECK: // %bb.0: // %entry
1204 ; CHECK-NEXT: ptrue p0.h
1205 ; CHECK-NEXT: cmpgt p0.h, p0/z, z2.h, #0
1206 ; CHECK-NEXT: eor z1.h, p0/m, z1.h, z0.h
1207 ; CHECK-NEXT: mov z0.d, z1.d
1210 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1211 %a = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %x, <vscale x 8 x i16> zeroinitializer
1212 %b = xor <vscale x 8 x i16> %a, %y
1213 ret <vscale x 8 x i16> %b
1216 define <vscale x 16 x i8> @xor_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1217 ; CHECK-LABEL: xor_nxv16i8_y:
1218 ; CHECK: // %bb.0: // %entry
1219 ; CHECK-NEXT: ptrue p0.b
1220 ; CHECK-NEXT: cmpgt p0.b, p0/z, z2.b, #0
1221 ; CHECK-NEXT: eor z1.b, p0/m, z1.b, z0.b
1222 ; CHECK-NEXT: mov z0.d, z1.d
1225 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1226 %a = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %x, <vscale x 16 x i8> zeroinitializer
1227 %b = xor <vscale x 16 x i8> %a, %y
1228 ret <vscale x 16 x i8> %b
1231 define <vscale x 2 x i64> @shl_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1232 ; CHECK-LABEL: shl_nxv2i64_y:
1233 ; CHECK: // %bb.0: // %entry
1234 ; CHECK-NEXT: ptrue p0.d
1235 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
1236 ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d
1237 ; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
1240 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1241 %a = shl <vscale x 2 x i64> %x, %y
1242 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1243 ret <vscale x 2 x i64> %b
1246 define <vscale x 4 x i32> @shl_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1247 ; CHECK-LABEL: shl_nxv4i32_y:
1248 ; CHECK: // %bb.0: // %entry
1249 ; CHECK-NEXT: ptrue p0.s
1250 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
1251 ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s
1252 ; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
1255 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1256 %a = shl <vscale x 4 x i32> %x, %y
1257 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1258 ret <vscale x 4 x i32> %b
1261 define <vscale x 8 x i16> @shl_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1262 ; CHECK-LABEL: shl_nxv8i16_y:
1263 ; CHECK: // %bb.0: // %entry
1264 ; CHECK-NEXT: ptrue p0.h
1265 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
1266 ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h
1267 ; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
1270 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1271 %a = shl <vscale x 8 x i16> %x, %y
1272 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1273 ret <vscale x 8 x i16> %b
1276 define <vscale x 16 x i8> @shl_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1277 ; CHECK-LABEL: shl_nxv16i8_y:
1278 ; CHECK: // %bb.0: // %entry
1279 ; CHECK-NEXT: ptrue p0.b
1280 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
1281 ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b
1282 ; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
1285 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1286 %a = shl <vscale x 16 x i8> %x, %y
1287 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1288 ret <vscale x 16 x i8> %b
1291 define <vscale x 2 x i64> @ashr_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1292 ; CHECK-LABEL: ashr_nxv2i64_y:
1293 ; CHECK: // %bb.0: // %entry
1294 ; CHECK-NEXT: ptrue p0.d
1295 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
1296 ; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d
1297 ; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
1300 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1301 %a = ashr <vscale x 2 x i64> %x, %y
1302 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1303 ret <vscale x 2 x i64> %b
1306 define <vscale x 4 x i32> @ashr_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1307 ; CHECK-LABEL: ashr_nxv4i32_y:
1308 ; CHECK: // %bb.0: // %entry
1309 ; CHECK-NEXT: ptrue p0.s
1310 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
1311 ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s
1312 ; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
1315 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1316 %a = ashr <vscale x 4 x i32> %x, %y
1317 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1318 ret <vscale x 4 x i32> %b
1321 define <vscale x 8 x i16> @ashr_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1322 ; CHECK-LABEL: ashr_nxv8i16_y:
1323 ; CHECK: // %bb.0: // %entry
1324 ; CHECK-NEXT: ptrue p0.h
1325 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
1326 ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h
1327 ; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
1330 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1331 %a = ashr <vscale x 8 x i16> %x, %y
1332 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1333 ret <vscale x 8 x i16> %b
1336 define <vscale x 16 x i8> @ashr_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1337 ; CHECK-LABEL: ashr_nxv16i8_y:
1338 ; CHECK: // %bb.0: // %entry
1339 ; CHECK-NEXT: ptrue p0.b
1340 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
1341 ; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b
1342 ; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
1345 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1346 %a = ashr <vscale x 16 x i8> %x, %y
1347 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1348 ret <vscale x 16 x i8> %b
1351 define <vscale x 2 x i64> @lshr_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %n) {
1352 ; CHECK-LABEL: lshr_nxv2i64_y:
1353 ; CHECK: // %bb.0: // %entry
1354 ; CHECK-NEXT: ptrue p0.d
1355 ; CHECK-NEXT: cmpgt p1.d, p0/z, z2.d, #0
1356 ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d
1357 ; CHECK-NEXT: sel z0.d, p1, z0.d, z1.d
1360 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1361 %a = lshr <vscale x 2 x i64> %x, %y
1362 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1363 ret <vscale x 2 x i64> %b
1366 define <vscale x 4 x i32> @lshr_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n) {
1367 ; CHECK-LABEL: lshr_nxv4i32_y:
1368 ; CHECK: // %bb.0: // %entry
1369 ; CHECK-NEXT: ptrue p0.s
1370 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
1371 ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s
1372 ; CHECK-NEXT: sel z0.s, p1, z0.s, z1.s
1375 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1376 %a = lshr <vscale x 4 x i32> %x, %y
1377 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1378 ret <vscale x 4 x i32> %b
1381 define <vscale x 8 x i16> @lshr_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %n) {
1382 ; CHECK-LABEL: lshr_nxv8i16_y:
1383 ; CHECK: // %bb.0: // %entry
1384 ; CHECK-NEXT: ptrue p0.h
1385 ; CHECK-NEXT: cmpgt p1.h, p0/z, z2.h, #0
1386 ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h
1387 ; CHECK-NEXT: sel z0.h, p1, z0.h, z1.h
1390 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1391 %a = lshr <vscale x 8 x i16> %x, %y
1392 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1393 ret <vscale x 8 x i16> %b
1396 define <vscale x 16 x i8> @lshr_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %n) {
1397 ; CHECK-LABEL: lshr_nxv16i8_y:
1398 ; CHECK: // %bb.0: // %entry
1399 ; CHECK-NEXT: ptrue p0.b
1400 ; CHECK-NEXT: cmpgt p1.b, p0/z, z2.b, #0
1401 ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b
1402 ; CHECK-NEXT: sel z0.b, p1, z0.b, z1.b
1405 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1406 %a = lshr <vscale x 16 x i8> %x, %y
1407 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1408 ret <vscale x 16 x i8> %b
1411 define <vscale x 2 x i64> @mla_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %z, <vscale x 2 x i64> %n) {
1412 ; CHECK-LABEL: mla_nxv2i64_y:
1413 ; CHECK: // %bb.0: // %entry
1414 ; CHECK-NEXT: ptrue p0.d
1415 ; CHECK-NEXT: cmpgt p0.d, p0/z, z3.d, #0
1416 ; CHECK-NEXT: mad z1.d, p0/m, z2.d, z0.d
1417 ; CHECK-NEXT: mov z0.d, z1.d
1420 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1421 %m = mul <vscale x 2 x i64> %y, %z
1422 %a = add <vscale x 2 x i64> %m, %x
1423 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1424 ret <vscale x 2 x i64> %b
1427 define <vscale x 4 x i32> @mla_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %z, <vscale x 4 x i32> %n) {
1428 ; CHECK-LABEL: mla_nxv4i32_y:
1429 ; CHECK: // %bb.0: // %entry
1430 ; CHECK-NEXT: ptrue p0.s
1431 ; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, #0
1432 ; CHECK-NEXT: mad z1.s, p0/m, z2.s, z0.s
1433 ; CHECK-NEXT: mov z0.d, z1.d
1436 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1437 %m = mul <vscale x 4 x i32> %y, %z
1438 %a = add <vscale x 4 x i32> %m, %x
1439 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1440 ret <vscale x 4 x i32> %b
1443 define <vscale x 8 x i16> @mla_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %z, <vscale x 8 x i16> %n) {
1444 ; CHECK-LABEL: mla_nxv8i16_y:
1445 ; CHECK: // %bb.0: // %entry
1446 ; CHECK-NEXT: ptrue p0.h
1447 ; CHECK-NEXT: cmpgt p0.h, p0/z, z3.h, #0
1448 ; CHECK-NEXT: mad z1.h, p0/m, z2.h, z0.h
1449 ; CHECK-NEXT: mov z0.d, z1.d
1452 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1453 %m = mul <vscale x 8 x i16> %y, %z
1454 %a = add <vscale x 8 x i16> %m, %x
1455 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1456 ret <vscale x 8 x i16> %b
1459 define <vscale x 16 x i8> @mla_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z, <vscale x 16 x i8> %n) {
1460 ; CHECK-LABEL: mla_nxv16i8_y:
1461 ; CHECK: // %bb.0: // %entry
1462 ; CHECK-NEXT: ptrue p0.b
1463 ; CHECK-NEXT: cmpgt p0.b, p0/z, z3.b, #0
1464 ; CHECK-NEXT: mad z1.b, p0/m, z2.b, z0.b
1465 ; CHECK-NEXT: mov z0.d, z1.d
1468 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1469 %m = mul <vscale x 16 x i8> %y, %z
1470 %a = add <vscale x 16 x i8> %m, %x
1471 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1472 ret <vscale x 16 x i8> %b
1475 define <vscale x 2 x i64> @mls_nxv2i64_y(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y, <vscale x 2 x i64> %z, <vscale x 2 x i64> %n) {
1476 ; CHECK-LABEL: mls_nxv2i64_y:
1477 ; CHECK: // %bb.0: // %entry
1478 ; CHECK-NEXT: ptrue p0.d
1479 ; CHECK-NEXT: cmpgt p0.d, p0/z, z3.d, #0
1480 ; CHECK-NEXT: msb z1.d, p0/m, z0.d, z2.d
1481 ; CHECK-NEXT: mov z0.d, z1.d
1484 %c = icmp sgt <vscale x 2 x i64> %n, zeroinitializer
1485 %m = mul <vscale x 2 x i64> %x, %y
1486 %a = sub <vscale x 2 x i64> %z, %m
1487 %b = select <vscale x 2 x i1> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %y
1488 ret <vscale x 2 x i64> %b
1491 define <vscale x 4 x i32> @mls_nxv4i32_y(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %z, <vscale x 4 x i32> %n) {
1492 ; CHECK-LABEL: mls_nxv4i32_y:
1493 ; CHECK: // %bb.0: // %entry
1494 ; CHECK-NEXT: ptrue p0.s
1495 ; CHECK-NEXT: cmpgt p0.s, p0/z, z3.s, #0
1496 ; CHECK-NEXT: msb z1.s, p0/m, z0.s, z2.s
1497 ; CHECK-NEXT: mov z0.d, z1.d
1500 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1501 %m = mul <vscale x 4 x i32> %x, %y
1502 %a = sub <vscale x 4 x i32> %z, %m
1503 %b = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %y
1504 ret <vscale x 4 x i32> %b
1507 define <vscale x 8 x i16> @mls_nxv8i16_y(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y, <vscale x 8 x i16> %z, <vscale x 8 x i16> %n) {
1508 ; CHECK-LABEL: mls_nxv8i16_y:
1509 ; CHECK: // %bb.0: // %entry
1510 ; CHECK-NEXT: ptrue p0.h
1511 ; CHECK-NEXT: cmpgt p0.h, p0/z, z3.h, #0
1512 ; CHECK-NEXT: msb z1.h, p0/m, z0.h, z2.h
1513 ; CHECK-NEXT: mov z0.d, z1.d
1516 %c = icmp sgt <vscale x 8 x i16> %n, zeroinitializer
1517 %m = mul <vscale x 8 x i16> %x, %y
1518 %a = sub <vscale x 8 x i16> %z, %m
1519 %b = select <vscale x 8 x i1> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %y
1520 ret <vscale x 8 x i16> %b
1523 define <vscale x 16 x i8> @mls_nxv16i8_y(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y, <vscale x 16 x i8> %z, <vscale x 16 x i8> %n) {
1524 ; CHECK-LABEL: mls_nxv16i8_y:
1525 ; CHECK: // %bb.0: // %entry
1526 ; CHECK-NEXT: ptrue p0.b
1527 ; CHECK-NEXT: cmpgt p0.b, p0/z, z3.b, #0
1528 ; CHECK-NEXT: msb z1.b, p0/m, z0.b, z2.b
1529 ; CHECK-NEXT: mov z0.d, z1.d
1532 %c = icmp sgt <vscale x 16 x i8> %n, zeroinitializer
1533 %m = mul <vscale x 16 x i8> %x, %y
1534 %a = sub <vscale x 16 x i8> %z, %m
1535 %b = select <vscale x 16 x i1> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %y
1536 ret <vscale x 16 x i8> %b
1539 define <vscale x 4 x float> @fadd_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1540 ; CHECK-LABEL: fadd_nxv4f32_y:
1541 ; CHECK: // %bb.0: // %entry
1542 ; CHECK-NEXT: ptrue p0.s
1543 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1544 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1545 ; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z0.s
1546 ; CHECK-NEXT: mov z0.d, z1.d
1549 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1550 %a = select <vscale x 4 x i1> %c, <vscale x 4 x float> %x, <vscale x 4 x float> splat (float -0.000000e+00)
1551 %b = fadd <vscale x 4 x float> %a, %y
1552 ret <vscale x 4 x float> %b
1555 define <vscale x 8 x half> @fadd_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1556 ; CHECK-LABEL: fadd_nxv8f16_y:
1557 ; CHECK: // %bb.0: // %entry
1558 ; CHECK-NEXT: ptrue p0.h
1559 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1560 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1561 ; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z0.h
1562 ; CHECK-NEXT: mov z0.d, z1.d
1565 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1566 %a = select <vscale x 8 x i1> %c, <vscale x 8 x half> %x, <vscale x 8 x half> splat (half 0xH8000)
1567 %b = fadd <vscale x 8 x half> %a, %y
1568 ret <vscale x 8 x half> %b
1571 define <vscale x 2 x double> @fadd_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1572 ; CHECK-LABEL: fadd_nxv2f64_y:
1573 ; CHECK: // %bb.0: // %entry
1574 ; CHECK-NEXT: ptrue p0.d
1575 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1576 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1577 ; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z0.d
1578 ; CHECK-NEXT: mov z0.d, z1.d
1581 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1582 %a = select <vscale x 2 x i1> %c, <vscale x 2 x double> %x, <vscale x 2 x double> splat (double -0.000000e+00)
1583 %b = fadd <vscale x 2 x double> %a, %y
1584 ret <vscale x 2 x double> %b
1587 define <vscale x 4 x float> @fsub_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1588 ; CHECK-LABEL: fsub_nxv4f32_y:
1589 ; CHECK: // %bb.0: // %entry
1590 ; CHECK-NEXT: ptrue p0.s
1591 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1592 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1593 ; CHECK-NEXT: fsubr z1.s, p0/m, z1.s, z0.s
1594 ; CHECK-NEXT: mov z0.d, z1.d
1597 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1598 %a = fsub <vscale x 4 x float> %x, %y
1599 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
1600 ret <vscale x 4 x float> %b
1603 define <vscale x 8 x half> @fsub_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1604 ; CHECK-LABEL: fsub_nxv8f16_y:
1605 ; CHECK: // %bb.0: // %entry
1606 ; CHECK-NEXT: ptrue p0.h
1607 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1608 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1609 ; CHECK-NEXT: fsubr z1.h, p0/m, z1.h, z0.h
1610 ; CHECK-NEXT: mov z0.d, z1.d
1613 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1614 %a = fsub <vscale x 8 x half> %x, %y
1615 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
1616 ret <vscale x 8 x half> %b
1619 define <vscale x 2 x double> @fsub_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1620 ; CHECK-LABEL: fsub_nxv2f64_y:
1621 ; CHECK: // %bb.0: // %entry
1622 ; CHECK-NEXT: ptrue p0.d
1623 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1624 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1625 ; CHECK-NEXT: fsubr z1.d, p0/m, z1.d, z0.d
1626 ; CHECK-NEXT: mov z0.d, z1.d
1629 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1630 %a = fsub <vscale x 2 x double> %x, %y
1631 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
1632 ret <vscale x 2 x double> %b
1635 define <vscale x 4 x float> @fmul_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1636 ; CHECK-LABEL: fmul_nxv4f32_y:
1637 ; CHECK: // %bb.0: // %entry
1638 ; CHECK-NEXT: ptrue p0.s
1639 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1640 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1641 ; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z0.s
1642 ; CHECK-NEXT: mov z0.d, z1.d
1645 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1646 %a = select <vscale x 4 x i1> %c, <vscale x 4 x float> %x, <vscale x 4 x float> splat (float 1.000000e+00)
1647 %b = fmul <vscale x 4 x float> %a, %y
1648 ret <vscale x 4 x float> %b
1651 define <vscale x 8 x half> @fmul_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1652 ; CHECK-LABEL: fmul_nxv8f16_y:
1653 ; CHECK: // %bb.0: // %entry
1654 ; CHECK-NEXT: ptrue p0.h
1655 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1656 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1657 ; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z0.h
1658 ; CHECK-NEXT: mov z0.d, z1.d
1661 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1662 %a = select <vscale x 8 x i1> %c, <vscale x 8 x half> %x, <vscale x 8 x half> splat (half 0xH3C00)
1663 %b = fmul <vscale x 8 x half> %a, %y
1664 ret <vscale x 8 x half> %b
1667 define <vscale x 2 x double> @fmul_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1668 ; CHECK-LABEL: fmul_nxv2f64_y:
1669 ; CHECK: // %bb.0: // %entry
1670 ; CHECK-NEXT: ptrue p0.d
1671 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1672 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1673 ; CHECK-NEXT: fmul z1.d, p0/m, z1.d, z0.d
1674 ; CHECK-NEXT: mov z0.d, z1.d
1677 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1678 %a = select <vscale x 2 x i1> %c, <vscale x 2 x double> %x, <vscale x 2 x double> splat (double 1.000000e+00)
1679 %b = fmul <vscale x 2 x double> %a, %y
1680 ret <vscale x 2 x double> %b
1683 define <vscale x 4 x float> @fdiv_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %n) {
1684 ; CHECK-LABEL: fdiv_nxv4f32_y:
1685 ; CHECK: // %bb.0: // %entry
1686 ; CHECK-NEXT: ptrue p0.s
1687 ; CHECK-NEXT: fdiv z0.s, p0/m, z0.s, z1.s
1688 ; CHECK-NEXT: fcmle p1.s, p0/z, z2.s, #0.0
1689 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1690 ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
1693 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1694 %a = fdiv <vscale x 4 x float> %x, %y
1695 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
1696 ret <vscale x 4 x float> %b
1699 define <vscale x 8 x half> @fdiv_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %n) {
1700 ; CHECK-LABEL: fdiv_nxv8f16_y:
1701 ; CHECK: // %bb.0: // %entry
1702 ; CHECK-NEXT: ptrue p0.h
1703 ; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h
1704 ; CHECK-NEXT: fcmle p1.h, p0/z, z2.h, #0.0
1705 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1706 ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
1709 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1710 %a = fdiv <vscale x 8 x half> %x, %y
1711 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
1712 ret <vscale x 8 x half> %b
1715 define <vscale x 2 x double> @fdiv_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %n) {
1716 ; CHECK-LABEL: fdiv_nxv2f64_y:
1717 ; CHECK: // %bb.0: // %entry
1718 ; CHECK-NEXT: ptrue p0.d
1719 ; CHECK-NEXT: fdiv z0.d, p0/m, z0.d, z1.d
1720 ; CHECK-NEXT: fcmle p1.d, p0/z, z2.d, #0.0
1721 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1722 ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
1725 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1726 %a = fdiv <vscale x 2 x double> %x, %y
1727 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
1728 ret <vscale x 2 x double> %b
1731 define <vscale x 4 x float> @fmai_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %z, <vscale x 4 x float> %n) {
1732 ; CHECK-LABEL: fmai_nxv4f32_y:
1733 ; CHECK: // %bb.0: // %entry
1734 ; CHECK-NEXT: ptrue p0.s
1735 ; CHECK-NEXT: fcmle p1.s, p0/z, z3.s, #0.0
1736 ; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
1737 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1738 ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
1741 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1742 %a = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> %y, <vscale x 4 x float> %z, <vscale x 4 x float> %x)
1743 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
1744 ret <vscale x 4 x float> %b
1747 define <vscale x 8 x half> @fmai_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %z, <vscale x 8 x half> %n) {
1748 ; CHECK-LABEL: fmai_nxv8f16_y:
1749 ; CHECK: // %bb.0: // %entry
1750 ; CHECK-NEXT: ptrue p0.h
1751 ; CHECK-NEXT: fcmle p1.h, p0/z, z3.h, #0.0
1752 ; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
1753 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1754 ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
1757 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1758 %a = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> %y, <vscale x 8 x half> %z, <vscale x 8 x half> %x)
1759 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
1760 ret <vscale x 8 x half> %b
1763 define <vscale x 2 x double> @fmai_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %z, <vscale x 2 x double> %n) {
1764 ; CHECK-LABEL: fmai_nxv2f64_y:
1765 ; CHECK: // %bb.0: // %entry
1766 ; CHECK-NEXT: ptrue p0.d
1767 ; CHECK-NEXT: fcmle p1.d, p0/z, z3.d, #0.0
1768 ; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d
1769 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1770 ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
1773 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1774 %a = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> %y, <vscale x 2 x double> %z, <vscale x 2 x double> %x)
1775 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
1776 ret <vscale x 2 x double> %b
1779 define <vscale x 4 x float> @fma_nxv4f32_y(<vscale x 4 x float> %x, <vscale x 4 x float> %y, <vscale x 4 x float> %z, <vscale x 4 x float> %n) {
1780 ; CHECK-LABEL: fma_nxv4f32_y:
1781 ; CHECK: // %bb.0: // %entry
1782 ; CHECK-NEXT: ptrue p0.s
1783 ; CHECK-NEXT: fcmle p1.s, p0/z, z3.s, #0.0
1784 ; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
1785 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1786 ; CHECK-NEXT: sel z0.s, p0, z0.s, z1.s
1789 %c = fcmp ugt <vscale x 4 x float> %n, zeroinitializer
1790 %m = fmul fast <vscale x 4 x float> %y, %z
1791 %a = fadd fast <vscale x 4 x float> %m, %x
1792 %b = select <vscale x 4 x i1> %c, <vscale x 4 x float> %a, <vscale x 4 x float> %y
1793 ret <vscale x 4 x float> %b
1796 define <vscale x 8 x half> @fma_nxv8f16_y(<vscale x 8 x half> %x, <vscale x 8 x half> %y, <vscale x 8 x half> %z, <vscale x 8 x half> %n) {
1797 ; CHECK-LABEL: fma_nxv8f16_y:
1798 ; CHECK: // %bb.0: // %entry
1799 ; CHECK-NEXT: ptrue p0.h
1800 ; CHECK-NEXT: fcmle p1.h, p0/z, z3.h, #0.0
1801 ; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
1802 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1803 ; CHECK-NEXT: sel z0.h, p0, z0.h, z1.h
1806 %c = fcmp ugt <vscale x 8 x half> %n, zeroinitializer
1807 %m = fmul fast <vscale x 8 x half> %y, %z
1808 %a = fadd fast <vscale x 8 x half> %m, %x
1809 %b = select <vscale x 8 x i1> %c, <vscale x 8 x half> %a, <vscale x 8 x half> %y
1810 ret <vscale x 8 x half> %b
1813 define <vscale x 2 x double> @fma_nxv2f64_y(<vscale x 2 x double> %x, <vscale x 2 x double> %y, <vscale x 2 x double> %z, <vscale x 2 x double> %n) {
1814 ; CHECK-LABEL: fma_nxv2f64_y:
1815 ; CHECK: // %bb.0: // %entry
1816 ; CHECK-NEXT: ptrue p0.d
1817 ; CHECK-NEXT: fcmle p1.d, p0/z, z3.d, #0.0
1818 ; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d
1819 ; CHECK-NEXT: not p0.b, p0/z, p1.b
1820 ; CHECK-NEXT: sel z0.d, p0, z0.d, z1.d
1823 %c = fcmp ugt <vscale x 2 x double> %n, zeroinitializer
1824 %m = fmul fast <vscale x 2 x double> %y, %z
1825 %a = fadd fast <vscale x 2 x double> %m, %x
1826 %b = select <vscale x 2 x i1> %c, <vscale x 2 x double> %a, <vscale x 2 x double> %y
1827 ret <vscale x 2 x double> %b
1831 define <vscale x 4 x i32> @mul_nxv4i32_multiuse_x(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y, <vscale x 4 x i32> %n, ptr %p) {
1832 ; CHECK-LABEL: mul_nxv4i32_multiuse_x:
1833 ; CHECK: // %bb.0: // %entry
1834 ; CHECK-NEXT: ptrue p0.s
1835 ; CHECK-NEXT: cmpgt p1.s, p0/z, z2.s, #0
1836 ; CHECK-NEXT: mov z2.s, #1 // =0x1
1837 ; CHECK-NEXT: sel z1.s, p1, z1.s, z2.s
1838 ; CHECK-NEXT: mul z0.s, z1.s, z0.s
1839 ; CHECK-NEXT: st1w { z1.s }, p0, [x0]
1842 %c = icmp sgt <vscale x 4 x i32> %n, zeroinitializer
1843 %a = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %y, <vscale x 4 x i32> splat (i32 1)
1844 store <vscale x 4 x i32> %a, ptr %p
1845 %b = mul <vscale x 4 x i32> %a, %x
1846 ret <vscale x 4 x i32> %b
1849 declare <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
1850 declare <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1851 declare <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)