1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
4 define <vscale x 8 x half> @fadd_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
5 ; CHECK-LABEL: fadd_nxv8f16:
7 ; CHECK-NEXT: fadd z0.h, z0.h, z1.h
9 %res = fadd <vscale x 8 x half> %a, %b
10 ret <vscale x 8 x half> %res
13 define <vscale x 4 x half> @fadd_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
14 ; CHECK-LABEL: fadd_nxv4f16:
16 ; CHECK-NEXT: ptrue p0.s
17 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
19 %res = fadd <vscale x 4 x half> %a, %b
20 ret <vscale x 4 x half> %res
23 define <vscale x 2 x half> @fadd_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
24 ; CHECK-LABEL: fadd_nxv2f16:
26 ; CHECK-NEXT: ptrue p0.d
27 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
29 %res = fadd <vscale x 2 x half> %a, %b
30 ret <vscale x 2 x half> %res
33 define <vscale x 4 x float> @fadd_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
34 ; CHECK-LABEL: fadd_nxv4f32:
36 ; CHECK-NEXT: fadd z0.s, z0.s, z1.s
38 %res = fadd <vscale x 4 x float> %a, %b
39 ret <vscale x 4 x float> %res
42 define <vscale x 2 x float> @fadd_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
43 ; CHECK-LABEL: fadd_nxv2f32:
45 ; CHECK-NEXT: ptrue p0.d
46 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
48 %res = fadd <vscale x 2 x float> %a, %b
49 ret <vscale x 2 x float> %res
52 define <vscale x 2 x double> @fadd_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
53 ; CHECK-LABEL: fadd_nxv2f64:
55 ; CHECK-NEXT: fadd z0.d, z0.d, z1.d
57 %res = fadd <vscale x 2 x double> %a, %b
58 ret <vscale x 2 x double> %res
61 define <vscale x 8 x half> @fdiv_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
62 ; CHECK-LABEL: fdiv_nxv8f16:
64 ; CHECK-NEXT: ptrue p0.h
65 ; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h
67 %res = fdiv <vscale x 8 x half> %a, %b
68 ret <vscale x 8 x half> %res
71 define <vscale x 4 x half> @fdiv_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
72 ; CHECK-LABEL: fdiv_nxv4f16:
74 ; CHECK-NEXT: ptrue p0.s
75 ; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h
77 %res = fdiv <vscale x 4 x half> %a, %b
78 ret <vscale x 4 x half> %res
81 define <vscale x 2 x half> @fdiv_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
82 ; CHECK-LABEL: fdiv_nxv2f16:
84 ; CHECK-NEXT: ptrue p0.d
85 ; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h
87 %res = fdiv <vscale x 2 x half> %a, %b
88 ret <vscale x 2 x half> %res
91 define <vscale x 4 x float> @fdiv_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
92 ; CHECK-LABEL: fdiv_nxv4f32:
94 ; CHECK-NEXT: ptrue p0.s
95 ; CHECK-NEXT: fdiv z0.s, p0/m, z0.s, z1.s
97 %res = fdiv <vscale x 4 x float> %a, %b
98 ret <vscale x 4 x float> %res
101 define <vscale x 2 x float> @fdiv_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
102 ; CHECK-LABEL: fdiv_nxv2f32:
104 ; CHECK-NEXT: ptrue p0.d
105 ; CHECK-NEXT: fdiv z0.s, p0/m, z0.s, z1.s
107 %res = fdiv <vscale x 2 x float> %a, %b
108 ret <vscale x 2 x float> %res
111 define <vscale x 2 x double> @fdiv_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
112 ; CHECK-LABEL: fdiv_nxv2f64:
114 ; CHECK-NEXT: ptrue p0.d
115 ; CHECK-NEXT: fdiv z0.d, p0/m, z0.d, z1.d
117 %res = fdiv <vscale x 2 x double> %a, %b
118 ret <vscale x 2 x double> %res
121 define <vscale x 8 x half> @fsub_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
122 ; CHECK-LABEL: fsub_nxv8f16:
124 ; CHECK-NEXT: fsub z0.h, z0.h, z1.h
126 %res = fsub <vscale x 8 x half> %a, %b
127 ret <vscale x 8 x half> %res
130 define <vscale x 4 x half> @fsub_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
131 ; CHECK-LABEL: fsub_nxv4f16:
133 ; CHECK-NEXT: ptrue p0.s
134 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h
136 %res = fsub <vscale x 4 x half> %a, %b
137 ret <vscale x 4 x half> %res
140 define <vscale x 2 x half> @fsub_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
141 ; CHECK-LABEL: fsub_nxv2f16:
143 ; CHECK-NEXT: ptrue p0.d
144 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h
146 %res = fsub <vscale x 2 x half> %a, %b
147 ret <vscale x 2 x half> %res
150 define <vscale x 4 x float> @fsub_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
151 ; CHECK-LABEL: fsub_nxv4f32:
153 ; CHECK-NEXT: fsub z0.s, z0.s, z1.s
155 %res = fsub <vscale x 4 x float> %a, %b
156 ret <vscale x 4 x float> %res
159 define <vscale x 2 x float> @fsub_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
160 ; CHECK-LABEL: fsub_nxv2f32:
162 ; CHECK-NEXT: ptrue p0.d
163 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s
165 %res = fsub <vscale x 2 x float> %a, %b
166 ret <vscale x 2 x float> %res
169 define <vscale x 2 x double> @fsub_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
170 ; CHECK-LABEL: fsub_nxv2f64:
172 ; CHECK-NEXT: fsub z0.d, z0.d, z1.d
174 %res = fsub <vscale x 2 x double> %a, %b
175 ret <vscale x 2 x double> %res
178 define <vscale x 8 x half> @fmul_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
179 ; CHECK-LABEL: fmul_nxv8f16:
181 ; CHECK-NEXT: fmul z0.h, z0.h, z1.h
183 %res = fmul <vscale x 8 x half> %a, %b
184 ret <vscale x 8 x half> %res
187 define <vscale x 4 x half> @fmul_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
188 ; CHECK-LABEL: fmul_nxv4f16:
190 ; CHECK-NEXT: ptrue p0.s
191 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
193 %res = fmul <vscale x 4 x half> %a, %b
194 ret <vscale x 4 x half> %res
197 define <vscale x 2 x half> @fmul_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
198 ; CHECK-LABEL: fmul_nxv2f16:
200 ; CHECK-NEXT: ptrue p0.d
201 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
203 %res = fmul <vscale x 2 x half> %a, %b
204 ret <vscale x 2 x half> %res
207 define <vscale x 4 x float> @fmul_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
208 ; CHECK-LABEL: fmul_nxv4f32:
210 ; CHECK-NEXT: fmul z0.s, z0.s, z1.s
212 %res = fmul <vscale x 4 x float> %a, %b
213 ret <vscale x 4 x float> %res
216 define <vscale x 2 x float> @fmul_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
217 ; CHECK-LABEL: fmul_nxv2f32:
219 ; CHECK-NEXT: ptrue p0.d
220 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s
222 %res = fmul <vscale x 2 x float> %a, %b
223 ret <vscale x 2 x float> %res
226 define <vscale x 2 x double> @fmul_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
227 ; CHECK-LABEL: fmul_nxv2f64:
229 ; CHECK-NEXT: fmul z0.d, z0.d, z1.d
231 %res = fmul <vscale x 2 x double> %a, %b
232 ret <vscale x 2 x double> %res
235 define <vscale x 8 x half> @fma_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
236 ; CHECK-LABEL: fma_nxv8f16:
238 ; CHECK-NEXT: ptrue p0.h
239 ; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h
241 %r = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c)
242 ret <vscale x 8 x half> %r
245 define <vscale x 4 x half> @fma_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %c) {
246 ; CHECK-LABEL: fma_nxv4f16:
248 ; CHECK-NEXT: ptrue p0.s
249 ; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h
251 %r = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %c)
252 ret <vscale x 4 x half> %r
255 define <vscale x 2 x half> @fma_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %c) {
256 ; CHECK-LABEL: fma_nxv2f16:
258 ; CHECK-NEXT: ptrue p0.d
259 ; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h
261 %r = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %c)
262 ret <vscale x 2 x half> %r
265 define <vscale x 4 x float> @fma_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
266 ; CHECK-LABEL: fma_nxv4f32:
268 ; CHECK-NEXT: ptrue p0.s
269 ; CHECK-NEXT: fmad z0.s, p0/m, z1.s, z2.s
271 %r = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c)
272 ret <vscale x 4 x float> %r
275 define <vscale x 2 x float> @fma_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %c) {
276 ; CHECK-LABEL: fma_nxv2f32:
278 ; CHECK-NEXT: ptrue p0.d
279 ; CHECK-NEXT: fmad z0.s, p0/m, z1.s, z2.s
281 %r = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %c)
282 ret <vscale x 2 x float> %r
285 define <vscale x 2 x double> @fma_nxv2f64_1(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
286 ; CHECK-LABEL: fma_nxv2f64_1:
288 ; CHECK-NEXT: ptrue p0.d
289 ; CHECK-NEXT: fmad z0.d, p0/m, z1.d, z2.d
291 %r = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c)
292 ret <vscale x 2 x double> %r
295 define <vscale x 2 x double> @fma_nxv2f64_2(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
296 ; CHECK-LABEL: fma_nxv2f64_2:
298 ; CHECK-NEXT: ptrue p0.d
299 ; CHECK-NEXT: fmad z0.d, p0/m, z1.d, z2.d
301 %r = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> %b, <vscale x 2 x double> %a, <vscale x 2 x double> %c)
302 ret <vscale x 2 x double> %r
305 define <vscale x 2 x double> @fma_nxv2f64_3(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
306 ; CHECK-LABEL: fma_nxv2f64_3:
308 ; CHECK-NEXT: ptrue p0.d
309 ; CHECK-NEXT: fmla z0.d, p0/m, z2.d, z1.d
311 %r = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x double> %b, <vscale x 2 x double> %a)
312 ret <vscale x 2 x double> %r
315 define <vscale x 8 x half> @fmls_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
316 ; CHECK-LABEL: fmls_nxv8f16:
318 ; CHECK-NEXT: ptrue p0.h
319 ; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h
321 %neg = fneg <vscale x 8 x half> %b
322 %r = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> %c, <vscale x 8 x half> %neg, <vscale x 8 x half> %a)
323 ret <vscale x 8 x half> %r
326 define <vscale x 4 x half> @fmls_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x half> %c) {
327 ; CHECK-LABEL: fmls_nxv4f16:
329 ; CHECK-NEXT: ptrue p0.s
330 ; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h
332 %neg = fneg <vscale x 4 x half> %b
333 %r = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> %c, <vscale x 4 x half> %neg, <vscale x 4 x half> %a)
334 ret <vscale x 4 x half> %r
337 define <vscale x 2 x half> @fmls_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x half> %c) {
338 ; CHECK-LABEL: fmls_nxv2f16:
340 ; CHECK-NEXT: ptrue p0.d
341 ; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h
343 %neg = fneg <vscale x 2 x half> %b
344 %r = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> %c, <vscale x 2 x half> %neg, <vscale x 2 x half> %a)
345 ret <vscale x 2 x half> %r
348 define <vscale x 4 x float> @fmls_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
349 ; CHECK-LABEL: fmls_nxv4f32:
351 ; CHECK-NEXT: ptrue p0.s
352 ; CHECK-NEXT: fmls z0.s, p0/m, z1.s, z2.s
354 %neg = fneg <vscale x 4 x float> %b
355 %r = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> %c, <vscale x 4 x float> %neg, <vscale x 4 x float> %a)
356 ret <vscale x 4 x float> %r
359 define <vscale x 2 x float> @fmls_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x float> %c) {
360 ; CHECK-LABEL: fmls_nxv2f32:
362 ; CHECK-NEXT: ptrue p0.d
363 ; CHECK-NEXT: fmls z0.s, p0/m, z1.s, z2.s
365 %neg = fneg <vscale x 2 x float> %b
366 %r = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> %c, <vscale x 2 x float> %neg, <vscale x 2 x float> %a)
367 ret <vscale x 2 x float> %r
370 define <vscale x 2 x double> @fmls_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
371 ; CHECK-LABEL: fmls_nxv2f64:
373 ; CHECK-NEXT: ptrue p0.d
374 ; CHECK-NEXT: fmls z0.d, p0/m, z1.d, z2.d
376 %neg = fneg <vscale x 2 x double> %b
377 %r = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> %c, <vscale x 2 x double> %neg, <vscale x 2 x double> %a)
378 ret <vscale x 2 x double> %r
381 define <vscale x 8 x half> @fneg_nxv8f16(<vscale x 8 x half> %a) {
382 ; CHECK-LABEL: fneg_nxv8f16:
384 ; CHECK-NEXT: ptrue p0.h
385 ; CHECK-NEXT: fneg z0.h, p0/m, z0.h
387 %res = fneg <vscale x 8 x half> %a
388 ret <vscale x 8 x half> %res
391 define <vscale x 4 x half> @fneg_nxv4f16(<vscale x 4 x half> %a) {
392 ; CHECK-LABEL: fneg_nxv4f16:
394 ; CHECK-NEXT: ptrue p0.s
395 ; CHECK-NEXT: fneg z0.h, p0/m, z0.h
397 %res = fneg <vscale x 4 x half> %a
398 ret <vscale x 4 x half> %res
401 define <vscale x 2 x half> @fneg_nxv2f16(<vscale x 2 x half> %a) {
402 ; CHECK-LABEL: fneg_nxv2f16:
404 ; CHECK-NEXT: ptrue p0.d
405 ; CHECK-NEXT: fneg z0.h, p0/m, z0.h
407 %res = fneg <vscale x 2 x half> %a
408 ret <vscale x 2 x half> %res
411 define <vscale x 4 x float> @fneg_nxv4f32(<vscale x 4 x float> %a) {
412 ; CHECK-LABEL: fneg_nxv4f32:
414 ; CHECK-NEXT: ptrue p0.s
415 ; CHECK-NEXT: fneg z0.s, p0/m, z0.s
417 %res = fneg <vscale x 4 x float> %a
418 ret <vscale x 4 x float> %res
421 define <vscale x 2 x float> @fneg_nxv2f32(<vscale x 2 x float> %a) {
422 ; CHECK-LABEL: fneg_nxv2f32:
424 ; CHECK-NEXT: ptrue p0.d
425 ; CHECK-NEXT: fneg z0.s, p0/m, z0.s
427 %res = fneg <vscale x 2 x float> %a
428 ret <vscale x 2 x float> %res
431 define <vscale x 2 x double> @fneg_nxv2f64(<vscale x 2 x double> %a) {
432 ; CHECK-LABEL: fneg_nxv2f64:
434 ; CHECK-NEXT: ptrue p0.d
435 ; CHECK-NEXT: fneg z0.d, p0/m, z0.d
437 %res = fneg <vscale x 2 x double> %a
438 ret <vscale x 2 x double> %res
441 define <vscale x 8 x half> @frecps_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
442 ; CHECK-LABEL: frecps_h:
444 ; CHECK-NEXT: frecps z0.h, z0.h, z1.h
446 %res = call <vscale x 8 x half> @llvm.aarch64.sve.frecps.x.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
447 ret <vscale x 8 x half> %res
450 define <vscale x 4 x float> @frecps_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
451 ; CHECK-LABEL: frecps_s:
453 ; CHECK-NEXT: frecps z0.s, z0.s, z1.s
455 %res = call <vscale x 4 x float> @llvm.aarch64.sve.frecps.x.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
456 ret <vscale x 4 x float> %res
459 define <vscale x 2 x double> @frecps_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
460 ; CHECK-LABEL: frecps_d:
462 ; CHECK-NEXT: frecps z0.d, z0.d, z1.d
464 %res = call <vscale x 2 x double> @llvm.aarch64.sve.frecps.x.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
465 ret <vscale x 2 x double> %res
468 define <vscale x 8 x half> @frsqrts_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
469 ; CHECK-LABEL: frsqrts_h:
471 ; CHECK-NEXT: frsqrts z0.h, z0.h, z1.h
473 %res = call <vscale x 8 x half> @llvm.aarch64.sve.frsqrts.x.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
474 ret <vscale x 8 x half> %res
477 define <vscale x 4 x float> @frsqrts_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
478 ; CHECK-LABEL: frsqrts_s:
480 ; CHECK-NEXT: frsqrts z0.s, z0.s, z1.s
482 %res = call <vscale x 4 x float> @llvm.aarch64.sve.frsqrts.x.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
483 ret <vscale x 4 x float> %res
486 define <vscale x 2 x double> @frsqrts_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
487 ; CHECK-LABEL: frsqrts_d:
489 ; CHECK-NEXT: frsqrts z0.d, z0.d, z1.d
491 %res = call <vscale x 2 x double> @llvm.aarch64.sve.frsqrts.x.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
492 ret <vscale x 2 x double> %res
495 %complex = type { { double, double } }
497 define void @scalar_to_vector(ptr %outval, <vscale x 2 x i1> %pred, <vscale x 2 x double> %in1, <vscale x 2 x double> %in2) {
498 ; CHECK-LABEL: scalar_to_vector:
500 ; CHECK-NEXT: faddv d0, p0, z0.d
501 ; CHECK-NEXT: faddv d1, p0, z1.d
502 ; CHECK-NEXT: mov v0.d[1], v1.d[0]
503 ; CHECK-NEXT: str q0, [x0]
505 %imagp = getelementptr inbounds %complex, ptr %outval, i64 0, i32 0, i32 1
506 %1 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %in1)
507 %2 = call double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1> %pred, <vscale x 2 x double> %in2)
508 store double %1, ptr %outval, align 8
509 store double %2, ptr %imagp, align 8
513 define void @float_copy(ptr %P1, ptr %P2) {
514 ; CHECK-LABEL: float_copy:
516 ; CHECK-NEXT: ptrue p0.s
517 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
518 ; CHECK-NEXT: st1w { z0.s }, p0, [x1]
520 %A = load <vscale x 4 x float>, ptr %P1, align 16
521 store <vscale x 4 x float> %A, ptr %P2, align 16
527 define <vscale x 8 x half> @fsqrt_nxv8f16(<vscale x 8 x half> %a) {
528 ; CHECK-LABEL: fsqrt_nxv8f16:
530 ; CHECK-NEXT: ptrue p0.h
531 ; CHECK-NEXT: fsqrt z0.h, p0/m, z0.h
533 %res = call <vscale x 8 x half> @llvm.sqrt.nxv8f16(<vscale x 8 x half> %a)
534 ret <vscale x 8 x half> %res
537 define <vscale x 4 x half> @fsqrt_nxv4f16(<vscale x 4 x half> %a) {
538 ; CHECK-LABEL: fsqrt_nxv4f16:
540 ; CHECK-NEXT: ptrue p0.s
541 ; CHECK-NEXT: fsqrt z0.h, p0/m, z0.h
543 %res = call <vscale x 4 x half> @llvm.sqrt.nxv4f16(<vscale x 4 x half> %a)
544 ret <vscale x 4 x half> %res
547 define <vscale x 2 x half> @fsqrt_nxv2f16(<vscale x 2 x half> %a) {
548 ; CHECK-LABEL: fsqrt_nxv2f16:
550 ; CHECK-NEXT: ptrue p0.d
551 ; CHECK-NEXT: fsqrt z0.h, p0/m, z0.h
553 %res = call <vscale x 2 x half> @llvm.sqrt.nxv2f16(<vscale x 2 x half> %a)
554 ret <vscale x 2 x half> %res
557 define <vscale x 4 x float> @fsqrt_nxv4f32(<vscale x 4 x float> %a) {
558 ; CHECK-LABEL: fsqrt_nxv4f32:
560 ; CHECK-NEXT: ptrue p0.s
561 ; CHECK-NEXT: fsqrt z0.s, p0/m, z0.s
563 %res = call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> %a)
564 ret <vscale x 4 x float> %res
567 define <vscale x 2 x float> @fsqrt_nxv2f32(<vscale x 2 x float> %a) {
568 ; CHECK-LABEL: fsqrt_nxv2f32:
570 ; CHECK-NEXT: ptrue p0.d
571 ; CHECK-NEXT: fsqrt z0.s, p0/m, z0.s
573 %res = call <vscale x 2 x float> @llvm.sqrt.nxv2f32(<vscale x 2 x float> %a)
574 ret <vscale x 2 x float> %res
577 define <vscale x 2 x double> @fsqrt_nxv2f64(<vscale x 2 x double> %a) {
578 ; CHECK-LABEL: fsqrt_nxv2f64:
580 ; CHECK-NEXT: ptrue p0.d
581 ; CHECK-NEXT: fsqrt z0.d, p0/m, z0.d
583 %res = call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> %a)
584 ret <vscale x 2 x double> %res
589 define <vscale x 8 x half> @fabs_nxv8f16(<vscale x 8 x half> %a) {
590 ; CHECK-LABEL: fabs_nxv8f16:
592 ; CHECK-NEXT: ptrue p0.h
593 ; CHECK-NEXT: fabs z0.h, p0/m, z0.h
595 %res = call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> %a)
596 ret <vscale x 8 x half> %res
599 define <vscale x 4 x half> @fabs_nxv4f16(<vscale x 4 x half> %a) {
600 ; CHECK-LABEL: fabs_nxv4f16:
602 ; CHECK-NEXT: ptrue p0.s
603 ; CHECK-NEXT: fabs z0.h, p0/m, z0.h
605 %res = call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> %a)
606 ret <vscale x 4 x half> %res
609 define <vscale x 2 x half> @fabs_nxv2f16(<vscale x 2 x half> %a) {
610 ; CHECK-LABEL: fabs_nxv2f16:
612 ; CHECK-NEXT: ptrue p0.d
613 ; CHECK-NEXT: fabs z0.h, p0/m, z0.h
615 %res = call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> %a)
616 ret <vscale x 2 x half> %res
619 define <vscale x 4 x float> @fabs_nxv4f32(<vscale x 4 x float> %a) {
620 ; CHECK-LABEL: fabs_nxv4f32:
622 ; CHECK-NEXT: ptrue p0.s
623 ; CHECK-NEXT: fabs z0.s, p0/m, z0.s
625 %res = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> %a)
626 ret <vscale x 4 x float> %res
629 define <vscale x 2 x float> @fabs_nxv2f32(<vscale x 2 x float> %a) {
630 ; CHECK-LABEL: fabs_nxv2f32:
632 ; CHECK-NEXT: ptrue p0.d
633 ; CHECK-NEXT: fabs z0.s, p0/m, z0.s
635 %res = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> %a)
636 ret <vscale x 2 x float> %res
639 define <vscale x 2 x double> @fabs_nxv2f64(<vscale x 2 x double> %a) {
640 ; CHECK-LABEL: fabs_nxv2f64:
642 ; CHECK-NEXT: ptrue p0.d
643 ; CHECK-NEXT: fabs z0.d, p0/m, z0.d
645 %res = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %a)
646 ret <vscale x 2 x double> %res
651 define <vscale x 8 x half> @fabd_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
652 ; CHECK-LABEL: fabd_nxv8f16:
654 ; CHECK-NEXT: ptrue p0.h
655 ; CHECK-NEXT: fabd z0.h, p0/m, z0.h, z1.h
657 %sub = fsub <vscale x 8 x half> %a, %b
658 %res = call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> %sub)
659 ret <vscale x 8 x half> %res
662 define <vscale x 4 x half> @fabd_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
663 ; CHECK-LABEL: fabd_nxv4f16:
665 ; CHECK-NEXT: ptrue p0.s
666 ; CHECK-NEXT: fabd z0.h, p0/m, z0.h, z1.h
668 %sub = fsub <vscale x 4 x half> %a, %b
669 %res = call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> %sub)
670 ret <vscale x 4 x half> %res
673 define <vscale x 2 x half> @fabd_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
674 ; CHECK-LABEL: fabd_nxv2f16:
676 ; CHECK-NEXT: ptrue p0.d
677 ; CHECK-NEXT: fabd z0.h, p0/m, z0.h, z1.h
679 %sub = fsub <vscale x 2 x half> %a, %b
680 %res = call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> %sub)
681 ret <vscale x 2 x half> %res
684 define <vscale x 4 x float> @fabd_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
685 ; CHECK-LABEL: fabd_nxv4f32:
687 ; CHECK-NEXT: ptrue p0.s
688 ; CHECK-NEXT: fabd z0.s, p0/m, z0.s, z1.s
690 %sub = fsub <vscale x 4 x float> %a, %b
691 %res = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> %sub)
692 ret <vscale x 4 x float> %res
695 define <vscale x 2 x float> @fabd_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
696 ; CHECK-LABEL: fabd_nxv2f32:
698 ; CHECK-NEXT: ptrue p0.d
699 ; CHECK-NEXT: fabd z0.s, p0/m, z0.s, z1.s
701 %sub = fsub <vscale x 2 x float> %a, %b
702 %res = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> %sub)
703 ret <vscale x 2 x float> %res
706 define <vscale x 2 x double> @fabd_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
707 ; CHECK-LABEL: fabd_nxv2f64:
709 ; CHECK-NEXT: ptrue p0.d
710 ; CHECK-NEXT: fabd z0.d, p0/m, z0.d, z1.d
712 %sub = fsub <vscale x 2 x double> %a, %b
713 %res = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %sub)
714 ret <vscale x 2 x double> %res
719 define <vscale x 16 x half> @maxnum_nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b) {
720 ; CHECK-LABEL: maxnum_nxv16f16:
722 ; CHECK-NEXT: ptrue p0.h
723 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z2.h
724 ; CHECK-NEXT: fmaxnm z1.h, p0/m, z1.h, z3.h
726 %res = call <vscale x 16 x half> @llvm.maxnum.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b)
727 ret <vscale x 16 x half> %res
730 define <vscale x 8 x half> @maxnum_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
731 ; CHECK-LABEL: maxnum_nxv8f16:
733 ; CHECK-NEXT: ptrue p0.h
734 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
736 %res = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
737 ret <vscale x 8 x half> %res
740 define <vscale x 4 x half> @maxnum_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
741 ; CHECK-LABEL: maxnum_nxv4f16:
743 ; CHECK-NEXT: ptrue p0.s
744 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
746 %res = call <vscale x 4 x half> @llvm.maxnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
747 ret <vscale x 4 x half> %res
750 define <vscale x 2 x half> @maxnum_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
751 ; CHECK-LABEL: maxnum_nxv2f16:
753 ; CHECK-NEXT: ptrue p0.d
754 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
756 %res = call <vscale x 2 x half> @llvm.maxnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
757 ret <vscale x 2 x half> %res
760 define <vscale x 8 x float> @maxnum_nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b) {
761 ; CHECK-LABEL: maxnum_nxv8f32:
763 ; CHECK-NEXT: ptrue p0.s
764 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z2.s
765 ; CHECK-NEXT: fmaxnm z1.s, p0/m, z1.s, z3.s
767 %res = call <vscale x 8 x float> @llvm.maxnum.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b)
768 ret <vscale x 8 x float> %res
771 define <vscale x 4 x float> @maxnum_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
772 ; CHECK-LABEL: maxnum_nxv4f32:
774 ; CHECK-NEXT: ptrue p0.s
775 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
777 %res = call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
778 ret <vscale x 4 x float> %res
781 define <vscale x 2 x float> @maxnum_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
782 ; CHECK-LABEL: maxnum_nxv2f32:
784 ; CHECK-NEXT: ptrue p0.d
785 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
787 %res = call <vscale x 2 x float> @llvm.maxnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
788 ret <vscale x 2 x float> %res
791 define <vscale x 4 x double> @maxnum_nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b) {
792 ; CHECK-LABEL: maxnum_nxv4f64:
794 ; CHECK-NEXT: ptrue p0.d
795 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z2.d
796 ; CHECK-NEXT: fmaxnm z1.d, p0/m, z1.d, z3.d
798 %res = call <vscale x 4 x double> @llvm.maxnum.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b)
799 ret <vscale x 4 x double> %res
802 define <vscale x 2 x double> @maxnum_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
803 ; CHECK-LABEL: maxnum_nxv2f64:
805 ; CHECK-NEXT: ptrue p0.d
806 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
808 %res = call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
809 ret <vscale x 2 x double> %res
812 define <vscale x 16 x half> @minnum_nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b) {
813 ; CHECK-LABEL: minnum_nxv16f16:
815 ; CHECK-NEXT: ptrue p0.h
816 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z2.h
817 ; CHECK-NEXT: fminnm z1.h, p0/m, z1.h, z3.h
819 %res = call <vscale x 16 x half> @llvm.minnum.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b)
820 ret <vscale x 16 x half> %res
823 define <vscale x 8 x half> @minnum_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
824 ; CHECK-LABEL: minnum_nxv8f16:
826 ; CHECK-NEXT: ptrue p0.h
827 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
829 %res = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
830 ret <vscale x 8 x half> %res
833 define <vscale x 4 x half> @minnum_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
834 ; CHECK-LABEL: minnum_nxv4f16:
836 ; CHECK-NEXT: ptrue p0.s
837 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
839 %res = call <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
840 ret <vscale x 4 x half> %res
843 define <vscale x 2 x half> @minnum_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
844 ; CHECK-LABEL: minnum_nxv2f16:
846 ; CHECK-NEXT: ptrue p0.d
847 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
849 %res = call <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
850 ret <vscale x 2 x half> %res
853 define <vscale x 8 x float> @minnum_nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b) {
854 ; CHECK-LABEL: minnum_nxv8f32:
856 ; CHECK-NEXT: ptrue p0.s
857 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z2.s
858 ; CHECK-NEXT: fminnm z1.s, p0/m, z1.s, z3.s
860 %res = call <vscale x 8 x float> @llvm.minnum.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b)
861 ret <vscale x 8 x float> %res
864 define <vscale x 4 x float> @minnum_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
865 ; CHECK-LABEL: minnum_nxv4f32:
867 ; CHECK-NEXT: ptrue p0.s
868 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
870 %res = call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
871 ret <vscale x 4 x float> %res
874 define <vscale x 2 x float> @minnum_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
875 ; CHECK-LABEL: minnum_nxv2f32:
877 ; CHECK-NEXT: ptrue p0.d
878 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
880 %res = call <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
881 ret <vscale x 2 x float> %res
884 define <vscale x 4 x double> @minnum_nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b) {
885 ; CHECK-LABEL: minnum_nxv4f64:
887 ; CHECK-NEXT: ptrue p0.d
888 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z2.d
889 ; CHECK-NEXT: fminnm z1.d, p0/m, z1.d, z3.d
891 %res = call <vscale x 4 x double> @llvm.minnum.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b)
892 ret <vscale x 4 x double> %res
895 define <vscale x 2 x double> @minnum_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
896 ; CHECK-LABEL: minnum_nxv2f64:
898 ; CHECK-NEXT: ptrue p0.d
899 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
901 %res = call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
902 ret <vscale x 2 x double> %res
907 define <vscale x 16 x half> @maximum_nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b) {
908 ; CHECK-LABEL: maximum_nxv16f16:
910 ; CHECK-NEXT: ptrue p0.h
911 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z2.h
912 ; CHECK-NEXT: fmax z1.h, p0/m, z1.h, z3.h
914 %res = call <vscale x 16 x half> @llvm.maximum.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b)
915 ret <vscale x 16 x half> %res
918 define <vscale x 8 x half> @maximum_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
919 ; CHECK-LABEL: maximum_nxv8f16:
921 ; CHECK-NEXT: ptrue p0.h
922 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
924 %res = call <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
925 ret <vscale x 8 x half> %res
928 define <vscale x 4 x half> @maximum_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
929 ; CHECK-LABEL: maximum_nxv4f16:
931 ; CHECK-NEXT: ptrue p0.s
932 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
934 %res = call <vscale x 4 x half> @llvm.maximum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
935 ret <vscale x 4 x half> %res
938 define <vscale x 2 x half> @maximum_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
939 ; CHECK-LABEL: maximum_nxv2f16:
941 ; CHECK-NEXT: ptrue p0.d
942 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
944 %res = call <vscale x 2 x half> @llvm.maximum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
945 ret <vscale x 2 x half> %res
948 define <vscale x 8 x float> @maximum_nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b) {
949 ; CHECK-LABEL: maximum_nxv8f32:
951 ; CHECK-NEXT: ptrue p0.s
952 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z2.s
953 ; CHECK-NEXT: fmax z1.s, p0/m, z1.s, z3.s
955 %res = call <vscale x 8 x float> @llvm.maximum.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b)
956 ret <vscale x 8 x float> %res
959 define <vscale x 4 x float> @maximum_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
960 ; CHECK-LABEL: maximum_nxv4f32:
962 ; CHECK-NEXT: ptrue p0.s
963 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s
965 %res = call <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
966 ret <vscale x 4 x float> %res
969 define <vscale x 2 x float> @maximum_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
970 ; CHECK-LABEL: maximum_nxv2f32:
972 ; CHECK-NEXT: ptrue p0.d
973 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s
975 %res = call <vscale x 2 x float> @llvm.maximum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
976 ret <vscale x 2 x float> %res
979 define <vscale x 4 x double> @maximum_nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b) {
980 ; CHECK-LABEL: maximum_nxv4f64:
982 ; CHECK-NEXT: ptrue p0.d
983 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z2.d
984 ; CHECK-NEXT: fmax z1.d, p0/m, z1.d, z3.d
986 %res = call <vscale x 4 x double> @llvm.maximum.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b)
987 ret <vscale x 4 x double> %res
990 define <vscale x 2 x double> @maximum_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
991 ; CHECK-LABEL: maximum_nxv2f64:
993 ; CHECK-NEXT: ptrue p0.d
994 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d
996 %res = call <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
997 ret <vscale x 2 x double> %res
1000 define <vscale x 16 x half> @minimum_nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b) {
1001 ; CHECK-LABEL: minimum_nxv16f16:
1003 ; CHECK-NEXT: ptrue p0.h
1004 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z2.h
1005 ; CHECK-NEXT: fmin z1.h, p0/m, z1.h, z3.h
1007 %res = call <vscale x 16 x half> @llvm.minimum.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b)
1008 ret <vscale x 16 x half> %res
1011 define <vscale x 8 x half> @minimum_nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
1012 ; CHECK-LABEL: minimum_nxv8f16:
1014 ; CHECK-NEXT: ptrue p0.h
1015 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
1017 %res = call <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
1018 ret <vscale x 8 x half> %res
1021 define <vscale x 4 x half> @minimum_nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b) {
1022 ; CHECK-LABEL: minimum_nxv4f16:
1024 ; CHECK-NEXT: ptrue p0.s
1025 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
1027 %res = call <vscale x 4 x half> @llvm.minimum.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b)
1028 ret <vscale x 4 x half> %res
1031 define <vscale x 2 x half> @minimum_nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b) {
1032 ; CHECK-LABEL: minimum_nxv2f16:
1034 ; CHECK-NEXT: ptrue p0.d
1035 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
1037 %res = call <vscale x 2 x half> @llvm.minimum.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b)
1038 ret <vscale x 2 x half> %res
1041 define <vscale x 8 x float> @minimum_nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b) {
1042 ; CHECK-LABEL: minimum_nxv8f32:
1044 ; CHECK-NEXT: ptrue p0.s
1045 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z2.s
1046 ; CHECK-NEXT: fmin z1.s, p0/m, z1.s, z3.s
1048 %res = call <vscale x 8 x float> @llvm.minimum.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b)
1049 ret <vscale x 8 x float> %res
1052 define <vscale x 4 x float> @minimum_nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
1053 ; CHECK-LABEL: minimum_nxv4f32:
1055 ; CHECK-NEXT: ptrue p0.s
1056 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s
1058 %res = call <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
1059 ret <vscale x 4 x float> %res
1062 define <vscale x 2 x float> @minimum_nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b) {
1063 ; CHECK-LABEL: minimum_nxv2f32:
1065 ; CHECK-NEXT: ptrue p0.d
1066 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s
1068 %res = call <vscale x 2 x float> @llvm.minimum.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b)
1069 ret <vscale x 2 x float> %res
1072 define <vscale x 4 x double> @minimum_nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b) {
1073 ; CHECK-LABEL: minimum_nxv4f64:
1075 ; CHECK-NEXT: ptrue p0.d
1076 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z2.d
1077 ; CHECK-NEXT: fmin z1.d, p0/m, z1.d, z3.d
1079 %res = call <vscale x 4 x double> @llvm.minimum.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b)
1080 ret <vscale x 4 x double> %res
1083 define <vscale x 2 x double> @minimum_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
1084 ; CHECK-LABEL: minimum_nxv2f64:
1086 ; CHECK-NEXT: ptrue p0.d
1087 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d
1089 %res = call <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
1090 ret <vscale x 2 x double> %res
1093 declare <vscale x 8 x half> @llvm.aarch64.sve.frecps.x.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
1094 declare <vscale x 4 x float> @llvm.aarch64.sve.frecps.x.nxv4f32(<vscale x 4 x float> , <vscale x 4 x float>)
1095 declare <vscale x 2 x double> @llvm.aarch64.sve.frecps.x.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
1097 declare <vscale x 8 x half> @llvm.aarch64.sve.frsqrts.x.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
1098 declare <vscale x 4 x float> @llvm.aarch64.sve.frsqrts.x.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
1099 declare <vscale x 2 x double> @llvm.aarch64.sve.frsqrts.x.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
1101 declare <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
1102 declare <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1103 declare <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>)
1104 declare <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
1105 declare <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>)
1106 declare <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>)
1108 declare <vscale x 8 x half> @llvm.sqrt.nxv8f16( <vscale x 8 x half>)
1109 declare <vscale x 4 x half> @llvm.sqrt.nxv4f16( <vscale x 4 x half>)
1110 declare <vscale x 2 x half> @llvm.sqrt.nxv2f16( <vscale x 2 x half>)
1111 declare <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float>)
1112 declare <vscale x 2 x float> @llvm.sqrt.nxv2f32(<vscale x 2 x float>)
1113 declare <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double>)
1115 declare <vscale x 8 x half> @llvm.fabs.nxv8f16( <vscale x 8 x half>)
1116 declare <vscale x 4 x half> @llvm.fabs.nxv4f16( <vscale x 4 x half>)
1117 declare <vscale x 2 x half> @llvm.fabs.nxv2f16( <vscale x 2 x half>)
1118 declare <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float>)
1119 declare <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float>)
1120 declare <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double>)
1122 declare <vscale x 16 x half> @llvm.maxnum.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>)
1123 declare <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
1124 declare <vscale x 4 x half> @llvm.maxnum.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>)
1125 declare <vscale x 2 x half> @llvm.maxnum.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>)
1126 declare <vscale x 8 x float> @llvm.maxnum.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>)
1127 declare <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
1128 declare <vscale x 2 x float> @llvm.maxnum.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>)
1129 declare <vscale x 4 x double> @llvm.maxnum.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>)
1130 declare <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
1131 declare <vscale x 16 x half> @llvm.minnum.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>)
1132 declare <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
1133 declare <vscale x 4 x half> @llvm.minnum.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>)
1134 declare <vscale x 2 x half> @llvm.minnum.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>)
1135 declare <vscale x 8 x float> @llvm.minnum.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>)
1136 declare <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
1137 declare <vscale x 2 x float> @llvm.minnum.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>)
1138 declare <vscale x 4 x double> @llvm.minnum.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>)
1139 declare <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
1141 declare <vscale x 16 x half> @llvm.maximum.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>)
1142 declare <vscale x 8 x half> @llvm.maximum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
1143 declare <vscale x 4 x half> @llvm.maximum.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>)
1144 declare <vscale x 2 x half> @llvm.maximum.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>)
1145 declare <vscale x 8 x float> @llvm.maximum.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>)
1146 declare <vscale x 4 x float> @llvm.maximum.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
1147 declare <vscale x 2 x float> @llvm.maximum.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>)
1148 declare <vscale x 4 x double> @llvm.maximum.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>)
1149 declare <vscale x 2 x double> @llvm.maximum.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
1150 declare <vscale x 16 x half> @llvm.minimum.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>)
1151 declare <vscale x 8 x half> @llvm.minimum.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>)
1152 declare <vscale x 4 x half> @llvm.minimum.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>)
1153 declare <vscale x 2 x half> @llvm.minimum.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>)
1154 declare <vscale x 8 x float> @llvm.minimum.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>)
1155 declare <vscale x 4 x float> @llvm.minimum.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>)
1156 declare <vscale x 2 x float> @llvm.minimum.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>)
1157 declare <vscale x 4 x double> @llvm.minimum.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>)
1158 declare <vscale x 2 x double> @llvm.minimum.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>)
1160 ; Function Attrs: nounwind readnone
1161 declare double @llvm.aarch64.sve.faddv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>) #2