1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
8 define <vscale x 8 x half> @fabd_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
11 ; CHECK-NEXT: fabd z0.h, p0/m, z0.h, z1.h
13 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fabd.nxv8f16(<vscale x 8 x i1> %pg,
14 <vscale x 8 x half> %a,
15 <vscale x 8 x half> %b)
16 ret <vscale x 8 x half> %out
19 define <vscale x 4 x float> @fabd_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
20 ; CHECK-LABEL: fabd_s:
22 ; CHECK-NEXT: fabd z0.s, p0/m, z0.s, z1.s
24 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fabd.nxv4f32(<vscale x 4 x i1> %pg,
25 <vscale x 4 x float> %a,
26 <vscale x 4 x float> %b)
27 ret <vscale x 4 x float> %out
30 define <vscale x 2 x double> @fabd_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
31 ; CHECK-LABEL: fabd_d:
33 ; CHECK-NEXT: fabd z0.d, p0/m, z0.d, z1.d
35 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fabd.nxv2f64(<vscale x 2 x i1> %pg,
36 <vscale x 2 x double> %a,
37 <vscale x 2 x double> %b)
38 ret <vscale x 2 x double> %out
45 define <vscale x 8 x half> @fabs_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
46 ; CHECK-LABEL: fabs_h:
48 ; CHECK-NEXT: fabs z0.h, p0/m, z1.h
50 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fabs.nxv8f16(<vscale x 8 x half> %a,
51 <vscale x 8 x i1> %pg,
52 <vscale x 8 x half> %b)
53 ret <vscale x 8 x half> %out
56 define <vscale x 4 x float> @fabs_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
57 ; CHECK-LABEL: fabs_s:
59 ; CHECK-NEXT: fabs z0.s, p0/m, z1.s
61 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fabs.nxv4f32(<vscale x 4 x float> %a,
62 <vscale x 4 x i1> %pg,
63 <vscale x 4 x float> %b)
64 ret <vscale x 4 x float> %out
67 define <vscale x 2 x double> @fabs_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
68 ; CHECK-LABEL: fabs_d:
70 ; CHECK-NEXT: fabs z0.d, p0/m, z1.d
72 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fabs.nxv2f64(<vscale x 2 x double> %a,
73 <vscale x 2 x i1> %pg,
74 <vscale x 2 x double> %b)
75 ret <vscale x 2 x double> %out
82 define <vscale x 8 x half> @fadd_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
83 ; CHECK-LABEL: fadd_h:
85 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
87 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
88 <vscale x 8 x half> %a,
89 <vscale x 8 x half> %b)
90 ret <vscale x 8 x half> %out
93 define <vscale x 4 x float> @fadd_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
94 ; CHECK-LABEL: fadd_s:
96 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
98 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
99 <vscale x 4 x float> %a,
100 <vscale x 4 x float> %b)
101 ret <vscale x 4 x float> %out
104 define <vscale x 2 x double> @fadd_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
105 ; CHECK-LABEL: fadd_d:
107 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
109 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
110 <vscale x 2 x double> %a,
111 <vscale x 2 x double> %b)
112 ret <vscale x 2 x double> %out
119 define <vscale x 8 x half> @fcadd_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
120 ; CHECK-LABEL: fcadd_h:
122 ; CHECK-NEXT: fcadd z0.h, p0/m, z0.h, z1.h, #90
124 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcadd.nxv8f16(<vscale x 8 x i1> %pg,
125 <vscale x 8 x half> %a,
126 <vscale x 8 x half> %b,
128 ret <vscale x 8 x half> %out
131 define <vscale x 4 x float> @fcadd_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
132 ; CHECK-LABEL: fcadd_s:
134 ; CHECK-NEXT: fcadd z0.s, p0/m, z0.s, z1.s, #270
136 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fcadd.nxv4f32(<vscale x 4 x i1> %pg,
137 <vscale x 4 x float> %a,
138 <vscale x 4 x float> %b,
140 ret <vscale x 4 x float> %out
143 define <vscale x 2 x double> @fcadd_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
144 ; CHECK-LABEL: fcadd_d:
146 ; CHECK-NEXT: fcadd z0.d, p0/m, z0.d, z1.d, #90
148 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fcadd.nxv2f64(<vscale x 2 x i1> %pg,
149 <vscale x 2 x double> %a,
150 <vscale x 2 x double> %b,
152 ret <vscale x 2 x double> %out
159 define <vscale x 8 x half> @fcmla_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
160 ; CHECK-LABEL: fcmla_h:
162 ; CHECK-NEXT: fcmla z0.h, p0/m, z1.h, z2.h, #90
164 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcmla.nxv8f16(<vscale x 8 x i1> %pg,
165 <vscale x 8 x half> %a,
166 <vscale x 8 x half> %b,
167 <vscale x 8 x half> %c,
169 ret <vscale x 8 x half> %out
172 define <vscale x 4 x float> @fcmla_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
173 ; CHECK-LABEL: fcmla_s:
175 ; CHECK-NEXT: fcmla z0.s, p0/m, z1.s, z2.s, #180
177 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fcmla.nxv4f32(<vscale x 4 x i1> %pg,
178 <vscale x 4 x float> %a,
179 <vscale x 4 x float> %b,
180 <vscale x 4 x float> %c,
182 ret <vscale x 4 x float> %out
185 define <vscale x 2 x double> @fcmla_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
186 ; CHECK-LABEL: fcmla_d:
188 ; CHECK-NEXT: fcmla z0.d, p0/m, z1.d, z2.d, #270
190 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fcmla.nxv2f64(<vscale x 2 x i1> %pg,
191 <vscale x 2 x double> %a,
192 <vscale x 2 x double> %b,
193 <vscale x 2 x double> %c,
195 ret <vscale x 2 x double> %out
202 define <vscale x 8 x half> @fcmla_lane_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
203 ; CHECK-LABEL: fcmla_lane_h:
205 ; CHECK-NEXT: fcmla z0.h, z1.h, z2.h[3], #0
207 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcmla.lane.nxv8f16(<vscale x 8 x half> %a,
208 <vscale x 8 x half> %b,
209 <vscale x 8 x half> %c,
212 ret <vscale x 8 x half> %out
215 define <vscale x 4 x float> @fcmla_lane_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
216 ; CHECK-LABEL: fcmla_lane_s:
218 ; CHECK-NEXT: fcmla z0.s, z1.s, z2.s[1], #90
220 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fcmla.lane.nxv4f32(<vscale x 4 x float> %a,
221 <vscale x 4 x float> %b,
222 <vscale x 4 x float> %c,
225 ret <vscale x 4 x float> %out
232 define <vscale x 8 x half> @fdiv_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
233 ; CHECK-LABEL: fdiv_h:
235 ; CHECK-NEXT: fdiv z0.h, p0/m, z0.h, z1.h
237 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fdiv.nxv8f16(<vscale x 8 x i1> %pg,
238 <vscale x 8 x half> %a,
239 <vscale x 8 x half> %b)
240 ret <vscale x 8 x half> %out
243 define <vscale x 4 x float> @fdiv_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
244 ; CHECK-LABEL: fdiv_s:
246 ; CHECK-NEXT: fdiv z0.s, p0/m, z0.s, z1.s
248 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fdiv.nxv4f32(<vscale x 4 x i1> %pg,
249 <vscale x 4 x float> %a,
250 <vscale x 4 x float> %b)
251 ret <vscale x 4 x float> %out
254 define <vscale x 2 x double> @fdiv_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
255 ; CHECK-LABEL: fdiv_d:
257 ; CHECK-NEXT: fdiv z0.d, p0/m, z0.d, z1.d
259 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fdiv.nxv2f64(<vscale x 2 x i1> %pg,
260 <vscale x 2 x double> %a,
261 <vscale x 2 x double> %b)
262 ret <vscale x 2 x double> %out
269 define <vscale x 8 x half> @fdivr_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
270 ; CHECK-LABEL: fdivr_h:
272 ; CHECK-NEXT: fdivr z0.h, p0/m, z0.h, z1.h
274 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fdivr.nxv8f16(<vscale x 8 x i1> %pg,
275 <vscale x 8 x half> %a,
276 <vscale x 8 x half> %b)
277 ret <vscale x 8 x half> %out
280 define <vscale x 4 x float> @fdivr_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
281 ; CHECK-LABEL: fdivr_s:
283 ; CHECK-NEXT: fdivr z0.s, p0/m, z0.s, z1.s
285 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fdivr.nxv4f32(<vscale x 4 x i1> %pg,
286 <vscale x 4 x float> %a,
287 <vscale x 4 x float> %b)
288 ret <vscale x 4 x float> %out
291 define <vscale x 2 x double> @fdivr_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
292 ; CHECK-LABEL: fdivr_d:
294 ; CHECK-NEXT: fdivr z0.d, p0/m, z0.d, z1.d
296 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fdivr.nxv2f64(<vscale x 2 x i1> %pg,
297 <vscale x 2 x double> %a,
298 <vscale x 2 x double> %b)
299 ret <vscale x 2 x double> %out
306 define <vscale x 8 x half> @fexpa_h(<vscale x 8 x i16> %a) {
307 ; CHECK-LABEL: fexpa_h:
309 ; CHECK-NEXT: fexpa z0.h, z0.h
311 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fexpa.x.nxv8f16(<vscale x 8 x i16> %a)
312 ret <vscale x 8 x half> %out
315 define <vscale x 4 x float> @fexpa_s(<vscale x 4 x i32> %a) {
316 ; CHECK-LABEL: fexpa_s:
318 ; CHECK-NEXT: fexpa z0.s, z0.s
320 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fexpa.x.nxv4f32(<vscale x 4 x i32> %a)
321 ret <vscale x 4 x float> %out
324 define <vscale x 2 x double> @fexpa_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
325 ; CHECK-LABEL: fexpa_d:
327 ; CHECK-NEXT: fexpa z0.d, z0.d
329 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fexpa.x.nxv2f64(<vscale x 2 x i64> %a)
330 ret <vscale x 2 x double> %out
337 define <vscale x 8 x half> @fmad_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
338 ; CHECK-LABEL: fmad_h:
340 ; CHECK-NEXT: fmad z0.h, p0/m, z1.h, z2.h
342 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmad.nxv8f16(<vscale x 8 x i1> %pg,
343 <vscale x 8 x half> %a,
344 <vscale x 8 x half> %b,
345 <vscale x 8 x half> %c)
346 ret <vscale x 8 x half> %out
349 define <vscale x 4 x float> @fmad_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
350 ; CHECK-LABEL: fmad_s:
352 ; CHECK-NEXT: fmad z0.s, p0/m, z1.s, z2.s
354 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmad.nxv4f32(<vscale x 4 x i1> %pg,
355 <vscale x 4 x float> %a,
356 <vscale x 4 x float> %b,
357 <vscale x 4 x float> %c)
358 ret <vscale x 4 x float> %out
361 define <vscale x 2 x double> @fmad_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
362 ; CHECK-LABEL: fmad_d:
364 ; CHECK-NEXT: fmad z0.d, p0/m, z1.d, z2.d
366 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmad.nxv2f64(<vscale x 2 x i1> %pg,
367 <vscale x 2 x double> %a,
368 <vscale x 2 x double> %b,
369 <vscale x 2 x double> %c)
370 ret <vscale x 2 x double> %out
377 define <vscale x 8 x half> @fmax_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
378 ; CHECK-LABEL: fmax_h:
380 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
382 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
383 <vscale x 8 x half> %a,
384 <vscale x 8 x half> %b)
385 ret <vscale x 8 x half> %out
388 define <vscale x 4 x float> @fmax_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
389 ; CHECK-LABEL: fmax_s:
391 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s
393 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
394 <vscale x 4 x float> %a,
395 <vscale x 4 x float> %b)
396 ret <vscale x 4 x float> %out
399 define <vscale x 2 x double> @fmax_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
400 ; CHECK-LABEL: fmax_d:
402 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d
404 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
405 <vscale x 2 x double> %a,
406 <vscale x 2 x double> %b)
407 ret <vscale x 2 x double> %out
414 define <vscale x 8 x half> @fmaxnm_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
415 ; CHECK-LABEL: fmaxnm_h:
417 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
419 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
420 <vscale x 8 x half> %a,
421 <vscale x 8 x half> %b)
422 ret <vscale x 8 x half> %out
425 define <vscale x 4 x float> @fmaxnm_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
426 ; CHECK-LABEL: fmaxnm_s:
428 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
430 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
431 <vscale x 4 x float> %a,
432 <vscale x 4 x float> %b)
433 ret <vscale x 4 x float> %out
436 define <vscale x 2 x double> @fmaxnm_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
437 ; CHECK-LABEL: fmaxnm_d:
439 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
441 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
442 <vscale x 2 x double> %a,
443 <vscale x 2 x double> %b)
444 ret <vscale x 2 x double> %out
451 define <vscale x 8 x half> @fmin_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
452 ; CHECK-LABEL: fmin_h:
454 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
456 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
457 <vscale x 8 x half> %a,
458 <vscale x 8 x half> %b)
459 ret <vscale x 8 x half> %out
462 define <vscale x 4 x float> @fmin_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
463 ; CHECK-LABEL: fmin_s:
465 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s
467 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
468 <vscale x 4 x float> %a,
469 <vscale x 4 x float> %b)
470 ret <vscale x 4 x float> %out
473 define <vscale x 2 x double> @fmin_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
474 ; CHECK-LABEL: fmin_d:
476 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d
478 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
479 <vscale x 2 x double> %a,
480 <vscale x 2 x double> %b)
481 ret <vscale x 2 x double> %out
488 define <vscale x 8 x half> @fminnm_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
489 ; CHECK-LABEL: fminnm_h:
491 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
493 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
494 <vscale x 8 x half> %a,
495 <vscale x 8 x half> %b)
496 ret <vscale x 8 x half> %out
499 define <vscale x 4 x float> @fminnm_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
500 ; CHECK-LABEL: fminnm_s:
502 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
504 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
505 <vscale x 4 x float> %a,
506 <vscale x 4 x float> %b)
507 ret <vscale x 4 x float> %out
510 define <vscale x 2 x double> @fminnm_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
511 ; CHECK-LABEL: fminnm_d:
513 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
515 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
516 <vscale x 2 x double> %a,
517 <vscale x 2 x double> %b)
518 ret <vscale x 2 x double> %out
525 define <vscale x 8 x half> @fmla_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
526 ; CHECK-LABEL: fmla_h:
528 ; CHECK-NEXT: fmla z0.h, p0/m, z1.h, z2.h
530 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmla.nxv8f16(<vscale x 8 x i1> %pg,
531 <vscale x 8 x half> %a,
532 <vscale x 8 x half> %b,
533 <vscale x 8 x half> %c)
534 ret <vscale x 8 x half> %out
537 define <vscale x 4 x float> @fmla_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
538 ; CHECK-LABEL: fmla_s:
540 ; CHECK-NEXT: fmla z0.s, p0/m, z1.s, z2.s
542 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmla.nxv4f32(<vscale x 4 x i1> %pg,
543 <vscale x 4 x float> %a,
544 <vscale x 4 x float> %b,
545 <vscale x 4 x float> %c)
546 ret <vscale x 4 x float> %out
549 define <vscale x 2 x double> @fmla_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
550 ; CHECK-LABEL: fmla_d:
552 ; CHECK-NEXT: fmla z0.d, p0/m, z1.d, z2.d
554 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmla.nxv2f64(<vscale x 2 x i1> %pg,
555 <vscale x 2 x double> %a,
556 <vscale x 2 x double> %b,
557 <vscale x 2 x double> %c)
558 ret <vscale x 2 x double> %out
565 define <vscale x 8 x half> @fmla_lane_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
566 ; CHECK-LABEL: fmla_lane_h:
568 ; CHECK-NEXT: fmla z0.h, z1.h, z2.h[3]
570 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmla.lane.nxv8f16(<vscale x 8 x half> %a,
571 <vscale x 8 x half> %b,
572 <vscale x 8 x half> %c,
574 ret <vscale x 8 x half> %out
577 define <vscale x 4 x float> @fmla_lane_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
578 ; CHECK-LABEL: fmla_lane_s:
580 ; CHECK-NEXT: fmla z0.s, z1.s, z2.s[2]
582 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmla.lane.nxv4f32(<vscale x 4 x float> %a,
583 <vscale x 4 x float> %b,
584 <vscale x 4 x float> %c,
586 ret <vscale x 4 x float> %out
589 define <vscale x 2 x double> @fmla_lane_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
590 ; CHECK-LABEL: fmla_lane_d:
592 ; CHECK-NEXT: fmla z0.d, z1.d, z2.d[1]
594 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmla.lane.nxv2f64(<vscale x 2 x double> %a,
595 <vscale x 2 x double> %b,
596 <vscale x 2 x double> %c,
598 ret <vscale x 2 x double> %out
605 define <vscale x 8 x half> @fmls_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
606 ; CHECK-LABEL: fmls_h:
608 ; CHECK-NEXT: fmls z0.h, p0/m, z1.h, z2.h
610 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmls.nxv8f16(<vscale x 8 x i1> %pg,
611 <vscale x 8 x half> %a,
612 <vscale x 8 x half> %b,
613 <vscale x 8 x half> %c)
614 ret <vscale x 8 x half> %out
617 define <vscale x 4 x float> @fmls_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
618 ; CHECK-LABEL: fmls_s:
620 ; CHECK-NEXT: fmls z0.s, p0/m, z1.s, z2.s
622 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmls.nxv4f32(<vscale x 4 x i1> %pg,
623 <vscale x 4 x float> %a,
624 <vscale x 4 x float> %b,
625 <vscale x 4 x float> %c)
626 ret <vscale x 4 x float> %out
629 define <vscale x 2 x double> @fmls_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
630 ; CHECK-LABEL: fmls_d:
632 ; CHECK-NEXT: fmls z0.d, p0/m, z1.d, z2.d
634 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmls.nxv2f64(<vscale x 2 x i1> %pg,
635 <vscale x 2 x double> %a,
636 <vscale x 2 x double> %b,
637 <vscale x 2 x double> %c)
638 ret <vscale x 2 x double> %out
645 define <vscale x 8 x half> @fmls_lane_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
646 ; CHECK-LABEL: fmls_lane_h:
648 ; CHECK-NEXT: fmls z0.h, z1.h, z2.h[3]
650 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmls.lane.nxv8f16(<vscale x 8 x half> %a,
651 <vscale x 8 x half> %b,
652 <vscale x 8 x half> %c,
654 ret <vscale x 8 x half> %out
657 define <vscale x 4 x float> @fmls_lane_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
658 ; CHECK-LABEL: fmls_lane_s:
660 ; CHECK-NEXT: fmls z0.s, z1.s, z2.s[2]
662 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmls.lane.nxv4f32(<vscale x 4 x float> %a,
663 <vscale x 4 x float> %b,
664 <vscale x 4 x float> %c,
666 ret <vscale x 4 x float> %out
669 define <vscale x 2 x double> @fmls_lane_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
670 ; CHECK-LABEL: fmls_lane_d:
672 ; CHECK-NEXT: fmls z0.d, z1.d, z2.d[1]
674 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmls.lane.nxv2f64(<vscale x 2 x double> %a,
675 <vscale x 2 x double> %b,
676 <vscale x 2 x double> %c,
678 ret <vscale x 2 x double> %out
685 define <vscale x 8 x half> @fmsb_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
686 ; CHECK-LABEL: fmsb_h:
688 ; CHECK-NEXT: fmsb z0.h, p0/m, z1.h, z2.h
690 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmsb.nxv8f16(<vscale x 8 x i1> %pg,
691 <vscale x 8 x half> %a,
692 <vscale x 8 x half> %b,
693 <vscale x 8 x half> %c)
694 ret <vscale x 8 x half> %out
697 define <vscale x 4 x float> @fmsb_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
698 ; CHECK-LABEL: fmsb_s:
700 ; CHECK-NEXT: fmsb z0.s, p0/m, z1.s, z2.s
702 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmsb.nxv4f32(<vscale x 4 x i1> %pg,
703 <vscale x 4 x float> %a,
704 <vscale x 4 x float> %b,
705 <vscale x 4 x float> %c)
706 ret <vscale x 4 x float> %out
709 define <vscale x 2 x double> @fmsb_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
710 ; CHECK-LABEL: fmsb_d:
712 ; CHECK-NEXT: fmsb z0.d, p0/m, z1.d, z2.d
714 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmsb.nxv2f64(<vscale x 2 x i1> %pg,
715 <vscale x 2 x double> %a,
716 <vscale x 2 x double> %b,
717 <vscale x 2 x double> %c)
718 ret <vscale x 2 x double> %out
725 define <vscale x 8 x half> @fmul_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
726 ; CHECK-LABEL: fmul_h:
728 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
730 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
731 <vscale x 8 x half> %a,
732 <vscale x 8 x half> %b)
733 ret <vscale x 8 x half> %out
736 define <vscale x 4 x float> @fmul_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
737 ; CHECK-LABEL: fmul_s:
739 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s
741 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
742 <vscale x 4 x float> %a,
743 <vscale x 4 x float> %b)
744 ret <vscale x 4 x float> %out
747 define <vscale x 2 x double> @fmul_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
748 ; CHECK-LABEL: fmul_d:
750 ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d
752 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
753 <vscale x 2 x double> %a,
754 <vscale x 2 x double> %b)
755 ret <vscale x 2 x double> %out
762 define <vscale x 8 x half> @fmul_lane_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
763 ; CHECK-LABEL: fmul_lane_h:
765 ; CHECK-NEXT: fmul z0.h, z0.h, z1.h[3]
767 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.lane.nxv8f16(<vscale x 8 x half> %a,
768 <vscale x 8 x half> %b,
770 ret <vscale x 8 x half> %out
773 define <vscale x 4 x float> @fmul_lane_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
774 ; CHECK-LABEL: fmul_lane_s:
776 ; CHECK-NEXT: fmul z0.s, z0.s, z1.s[2]
778 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.lane.nxv4f32(<vscale x 4 x float> %a,
779 <vscale x 4 x float> %b,
781 ret <vscale x 4 x float> %out
784 define <vscale x 2 x double> @fmul_lane_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
785 ; CHECK-LABEL: fmul_lane_d:
787 ; CHECK-NEXT: fmul z0.d, z0.d, z1.d[1]
789 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.lane.nxv2f64(<vscale x 2 x double> %a,
790 <vscale x 2 x double> %b,
792 ret <vscale x 2 x double> %out
799 define <vscale x 8 x half> @fmulx_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
800 ; CHECK-LABEL: fmulx_h:
802 ; CHECK-NEXT: fmulx z0.h, p0/m, z0.h, z1.h
804 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmulx.nxv8f16(<vscale x 8 x i1> %pg,
805 <vscale x 8 x half> %a,
806 <vscale x 8 x half> %b)
807 ret <vscale x 8 x half> %out
810 define <vscale x 4 x float> @fmulx_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
811 ; CHECK-LABEL: fmulx_s:
813 ; CHECK-NEXT: fmulx z0.s, p0/m, z0.s, z1.s
815 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmulx.nxv4f32(<vscale x 4 x i1> %pg,
816 <vscale x 4 x float> %a,
817 <vscale x 4 x float> %b)
818 ret <vscale x 4 x float> %out
821 define <vscale x 2 x double> @fmulx_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
822 ; CHECK-LABEL: fmulx_d:
824 ; CHECK-NEXT: fmulx z0.d, p0/m, z0.d, z1.d
826 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmulx.nxv2f64(<vscale x 2 x i1> %pg,
827 <vscale x 2 x double> %a,
828 <vscale x 2 x double> %b)
829 ret <vscale x 2 x double> %out
836 define <vscale x 8 x half> @fneg_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
837 ; CHECK-LABEL: fneg_h:
839 ; CHECK-NEXT: fneg z0.h, p0/m, z1.h
841 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fneg.nxv8f16(<vscale x 8 x half> %a,
842 <vscale x 8 x i1> %pg,
843 <vscale x 8 x half> %b)
844 ret <vscale x 8 x half> %out
847 define <vscale x 4 x float> @fneg_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
848 ; CHECK-LABEL: fneg_s:
850 ; CHECK-NEXT: fneg z0.s, p0/m, z1.s
852 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fneg.nxv4f32(<vscale x 4 x float> %a,
853 <vscale x 4 x i1> %pg,
854 <vscale x 4 x float> %b)
855 ret <vscale x 4 x float> %out
858 define <vscale x 2 x double> @fneg_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
859 ; CHECK-LABEL: fneg_d:
861 ; CHECK-NEXT: fneg z0.d, p0/m, z1.d
863 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fneg.nxv2f64(<vscale x 2 x double> %a,
864 <vscale x 2 x i1> %pg,
865 <vscale x 2 x double> %b)
866 ret <vscale x 2 x double> %out
873 define <vscale x 8 x half> @fnmad_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
874 ; CHECK-LABEL: fnmad_h:
876 ; CHECK-NEXT: fnmad z0.h, p0/m, z1.h, z2.h
878 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fnmad.nxv8f16(<vscale x 8 x i1> %pg,
879 <vscale x 8 x half> %a,
880 <vscale x 8 x half> %b,
881 <vscale x 8 x half> %c)
882 ret <vscale x 8 x half> %out
885 define <vscale x 4 x float> @fnmad_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
886 ; CHECK-LABEL: fnmad_s:
888 ; CHECK-NEXT: fnmad z0.s, p0/m, z1.s, z2.s
890 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fnmad.nxv4f32(<vscale x 4 x i1> %pg,
891 <vscale x 4 x float> %a,
892 <vscale x 4 x float> %b,
893 <vscale x 4 x float> %c)
894 ret <vscale x 4 x float> %out
897 define <vscale x 2 x double> @fnmad_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
898 ; CHECK-LABEL: fnmad_d:
900 ; CHECK-NEXT: fnmad z0.d, p0/m, z1.d, z2.d
902 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fnmad.nxv2f64(<vscale x 2 x i1> %pg,
903 <vscale x 2 x double> %a,
904 <vscale x 2 x double> %b,
905 <vscale x 2 x double> %c)
906 ret <vscale x 2 x double> %out
913 define <vscale x 8 x half> @fnmla_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
914 ; CHECK-LABEL: fnmla_h:
916 ; CHECK-NEXT: fnmla z0.h, p0/m, z1.h, z2.h
918 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fnmla.nxv8f16(<vscale x 8 x i1> %pg,
919 <vscale x 8 x half> %a,
920 <vscale x 8 x half> %b,
921 <vscale x 8 x half> %c)
922 ret <vscale x 8 x half> %out
925 define <vscale x 4 x float> @fnmla_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
926 ; CHECK-LABEL: fnmla_s:
928 ; CHECK-NEXT: fnmla z0.s, p0/m, z1.s, z2.s
930 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fnmla.nxv4f32(<vscale x 4 x i1> %pg,
931 <vscale x 4 x float> %a,
932 <vscale x 4 x float> %b,
933 <vscale x 4 x float> %c)
934 ret <vscale x 4 x float> %out
937 define <vscale x 2 x double> @fnmla_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
938 ; CHECK-LABEL: fnmla_d:
940 ; CHECK-NEXT: fnmla z0.d, p0/m, z1.d, z2.d
942 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fnmla.nxv2f64(<vscale x 2 x i1> %pg,
943 <vscale x 2 x double> %a,
944 <vscale x 2 x double> %b,
945 <vscale x 2 x double> %c)
946 ret <vscale x 2 x double> %out
953 define <vscale x 8 x half> @fnmls_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
954 ; CHECK-LABEL: fnmls_h:
956 ; CHECK-NEXT: fnmls z0.h, p0/m, z1.h, z2.h
958 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fnmls.nxv8f16(<vscale x 8 x i1> %pg,
959 <vscale x 8 x half> %a,
960 <vscale x 8 x half> %b,
961 <vscale x 8 x half> %c)
962 ret <vscale x 8 x half> %out
965 define <vscale x 4 x float> @fnmls_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
966 ; CHECK-LABEL: fnmls_s:
968 ; CHECK-NEXT: fnmls z0.s, p0/m, z1.s, z2.s
970 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fnmls.nxv4f32(<vscale x 4 x i1> %pg,
971 <vscale x 4 x float> %a,
972 <vscale x 4 x float> %b,
973 <vscale x 4 x float> %c)
974 ret <vscale x 4 x float> %out
977 define <vscale x 2 x double> @fnmls_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
978 ; CHECK-LABEL: fnmls_d:
980 ; CHECK-NEXT: fnmls z0.d, p0/m, z1.d, z2.d
982 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fnmls.nxv2f64(<vscale x 2 x i1> %pg,
983 <vscale x 2 x double> %a,
984 <vscale x 2 x double> %b,
985 <vscale x 2 x double> %c)
986 ret <vscale x 2 x double> %out
993 define <vscale x 8 x half> @fnmsb_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
994 ; CHECK-LABEL: fnmsb_h:
996 ; CHECK-NEXT: fnmsb z0.h, p0/m, z1.h, z2.h
998 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fnmsb.nxv8f16(<vscale x 8 x i1> %pg,
999 <vscale x 8 x half> %a,
1000 <vscale x 8 x half> %b,
1001 <vscale x 8 x half> %c)
1002 ret <vscale x 8 x half> %out
1005 define <vscale x 4 x float> @fnmsb_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
1006 ; CHECK-LABEL: fnmsb_s:
1008 ; CHECK-NEXT: fnmsb z0.s, p0/m, z1.s, z2.s
1010 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fnmsb.nxv4f32(<vscale x 4 x i1> %pg,
1011 <vscale x 4 x float> %a,
1012 <vscale x 4 x float> %b,
1013 <vscale x 4 x float> %c)
1014 ret <vscale x 4 x float> %out
1017 define <vscale x 2 x double> @fnmsb_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
1018 ; CHECK-LABEL: fnmsb_d:
1020 ; CHECK-NEXT: fnmsb z0.d, p0/m, z1.d, z2.d
1022 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fnmsb.nxv2f64(<vscale x 2 x i1> %pg,
1023 <vscale x 2 x double> %a,
1024 <vscale x 2 x double> %b,
1025 <vscale x 2 x double> %c)
1026 ret <vscale x 2 x double> %out
1033 define <vscale x 8 x half> @frecpe_h(<vscale x 8 x half> %a) {
1034 ; CHECK-LABEL: frecpe_h:
1036 ; CHECK-NEXT: frecpe z0.h, z0.h
1038 %out = call <vscale x 8 x half> @llvm.aarch64.sve.frecpe.x.nxv8f16(<vscale x 8 x half> %a)
1039 ret <vscale x 8 x half> %out
1042 define <vscale x 4 x float> @frecpe_s(<vscale x 4 x float> %a) {
1043 ; CHECK-LABEL: frecpe_s:
1045 ; CHECK-NEXT: frecpe z0.s, z0.s
1047 %out = call <vscale x 4 x float> @llvm.aarch64.sve.frecpe.x.nxv4f32(<vscale x 4 x float> %a)
1048 ret <vscale x 4 x float> %out
1051 define <vscale x 2 x double> @frecpe_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) {
1052 ; CHECK-LABEL: frecpe_d:
1054 ; CHECK-NEXT: frecpe z0.d, z0.d
1056 %out = call <vscale x 2 x double> @llvm.aarch64.sve.frecpe.x.nxv2f64(<vscale x 2 x double> %a)
1057 ret <vscale x 2 x double> %out
1064 define <vscale x 8 x half> @frecpx_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
1065 ; CHECK-LABEL: frecpx_h:
1067 ; CHECK-NEXT: frecpx z0.h, p0/m, z1.h
1069 %out = call <vscale x 8 x half> @llvm.aarch64.sve.frecpx.nxv8f16(<vscale x 8 x half> %a,
1070 <vscale x 8 x i1> %pg,
1071 <vscale x 8 x half> %b)
1072 ret <vscale x 8 x half> %out
1075 define <vscale x 4 x float> @frecpx_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
1076 ; CHECK-LABEL: frecpx_s:
1078 ; CHECK-NEXT: frecpx z0.s, p0/m, z1.s
1080 %out = call <vscale x 4 x float> @llvm.aarch64.sve.frecpx.nxv4f32(<vscale x 4 x float> %a,
1081 <vscale x 4 x i1> %pg,
1082 <vscale x 4 x float> %b)
1083 ret <vscale x 4 x float> %out
1086 define <vscale x 2 x double> @frecpx_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
1087 ; CHECK-LABEL: frecpx_d:
1089 ; CHECK-NEXT: frecpx z0.d, p0/m, z1.d
1091 %out = call <vscale x 2 x double> @llvm.aarch64.sve.frecpx.nxv2f64(<vscale x 2 x double> %a,
1092 <vscale x 2 x i1> %pg,
1093 <vscale x 2 x double> %b)
1094 ret <vscale x 2 x double> %out
1101 define <vscale x 8 x half> @frinta_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
1102 ; CHECK-LABEL: frinta_h:
1104 ; CHECK-NEXT: frinta z0.h, p0/m, z1.h
1106 %out = call <vscale x 8 x half> @llvm.aarch64.sve.frinta.nxv8f16(<vscale x 8 x half> %a,
1107 <vscale x 8 x i1> %pg,
1108 <vscale x 8 x half> %b)
1109 ret <vscale x 8 x half> %out
1112 define <vscale x 4 x float> @frinta_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
1113 ; CHECK-LABEL: frinta_s:
1115 ; CHECK-NEXT: frinta z0.s, p0/m, z1.s
1117 %out = call <vscale x 4 x float> @llvm.aarch64.sve.frinta.nxv4f32(<vscale x 4 x float> %a,
1118 <vscale x 4 x i1> %pg,
1119 <vscale x 4 x float> %b)
1120 ret <vscale x 4 x float> %out
1123 define <vscale x 2 x double> @frinta_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
1124 ; CHECK-LABEL: frinta_d:
1126 ; CHECK-NEXT: frinta z0.d, p0/m, z1.d
1128 %out = call <vscale x 2 x double> @llvm.aarch64.sve.frinta.nxv2f64(<vscale x 2 x double> %a,
1129 <vscale x 2 x i1> %pg,
1130 <vscale x 2 x double> %b)
1131 ret <vscale x 2 x double> %out
1138 define <vscale x 8 x half> @frinti_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
1139 ; CHECK-LABEL: frinti_h:
1141 ; CHECK-NEXT: frinti z0.h, p0/m, z1.h
1143 %out = call <vscale x 8 x half> @llvm.aarch64.sve.frinti.nxv8f16(<vscale x 8 x half> %a,
1144 <vscale x 8 x i1> %pg,
1145 <vscale x 8 x half> %b)
1146 ret <vscale x 8 x half> %out
1149 define <vscale x 4 x float> @frinti_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
1150 ; CHECK-LABEL: frinti_s:
1152 ; CHECK-NEXT: frinti z0.s, p0/m, z1.s
1154 %out = call <vscale x 4 x float> @llvm.aarch64.sve.frinti.nxv4f32(<vscale x 4 x float> %a,
1155 <vscale x 4 x i1> %pg,
1156 <vscale x 4 x float> %b)
1157 ret <vscale x 4 x float> %out
1160 define <vscale x 2 x double> @frinti_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
1161 ; CHECK-LABEL: frinti_d:
1163 ; CHECK-NEXT: frinti z0.d, p0/m, z1.d
1165 %out = call <vscale x 2 x double> @llvm.aarch64.sve.frinti.nxv2f64(<vscale x 2 x double> %a,
1166 <vscale x 2 x i1> %pg,
1167 <vscale x 2 x double> %b)
1168 ret <vscale x 2 x double> %out
1175 define <vscale x 8 x half> @frintm_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
1176 ; CHECK-LABEL: frintm_h:
1178 ; CHECK-NEXT: frintm z0.h, p0/m, z1.h
1180 %out = call <vscale x 8 x half> @llvm.aarch64.sve.frintm.nxv8f16(<vscale x 8 x half> %a,
1181 <vscale x 8 x i1> %pg,
1182 <vscale x 8 x half> %b)
1183 ret <vscale x 8 x half> %out
1186 define <vscale x 4 x float> @frintm_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
1187 ; CHECK-LABEL: frintm_s:
1189 ; CHECK-NEXT: frintm z0.s, p0/m, z1.s
1191 %out = call <vscale x 4 x float> @llvm.aarch64.sve.frintm.nxv4f32(<vscale x 4 x float> %a,
1192 <vscale x 4 x i1> %pg,
1193 <vscale x 4 x float> %b)
1194 ret <vscale x 4 x float> %out
1197 define <vscale x 2 x double> @frintm_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
1198 ; CHECK-LABEL: frintm_d:
1200 ; CHECK-NEXT: frintm z0.d, p0/m, z1.d
1202 %out = call <vscale x 2 x double> @llvm.aarch64.sve.frintm.nxv2f64(<vscale x 2 x double> %a,
1203 <vscale x 2 x i1> %pg,
1204 <vscale x 2 x double> %b)
1205 ret <vscale x 2 x double> %out
1212 define <vscale x 8 x half> @frintn_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
1213 ; CHECK-LABEL: frintn_h:
1215 ; CHECK-NEXT: frintn z0.h, p0/m, z1.h
1217 %out = call <vscale x 8 x half> @llvm.aarch64.sve.frintn.nxv8f16(<vscale x 8 x half> %a,
1218 <vscale x 8 x i1> %pg,
1219 <vscale x 8 x half> %b)
1220 ret <vscale x 8 x half> %out
1223 define <vscale x 4 x float> @frintn_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
1224 ; CHECK-LABEL: frintn_s:
1226 ; CHECK-NEXT: frintn z0.s, p0/m, z1.s
1228 %out = call <vscale x 4 x float> @llvm.aarch64.sve.frintn.nxv4f32(<vscale x 4 x float> %a,
1229 <vscale x 4 x i1> %pg,
1230 <vscale x 4 x float> %b)
1231 ret <vscale x 4 x float> %out
1234 define <vscale x 2 x double> @frintn_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
1235 ; CHECK-LABEL: frintn_d:
1237 ; CHECK-NEXT: frintn z0.d, p0/m, z1.d
1239 %out = call <vscale x 2 x double> @llvm.aarch64.sve.frintn.nxv2f64(<vscale x 2 x double> %a,
1240 <vscale x 2 x i1> %pg,
1241 <vscale x 2 x double> %b)
1242 ret <vscale x 2 x double> %out
1249 define <vscale x 8 x half> @frintp_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
1250 ; CHECK-LABEL: frintp_h:
1252 ; CHECK-NEXT: frintp z0.h, p0/m, z1.h
1254 %out = call <vscale x 8 x half> @llvm.aarch64.sve.frintp.nxv8f16(<vscale x 8 x half> %a,
1255 <vscale x 8 x i1> %pg,
1256 <vscale x 8 x half> %b)
1257 ret <vscale x 8 x half> %out
1260 define <vscale x 4 x float> @frintp_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
1261 ; CHECK-LABEL: frintp_s:
1263 ; CHECK-NEXT: frintp z0.s, p0/m, z1.s
1265 %out = call <vscale x 4 x float> @llvm.aarch64.sve.frintp.nxv4f32(<vscale x 4 x float> %a,
1266 <vscale x 4 x i1> %pg,
1267 <vscale x 4 x float> %b)
1268 ret <vscale x 4 x float> %out
1271 define <vscale x 2 x double> @frintp_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
1272 ; CHECK-LABEL: frintp_d:
1274 ; CHECK-NEXT: frintp z0.d, p0/m, z1.d
1276 %out = call <vscale x 2 x double> @llvm.aarch64.sve.frintp.nxv2f64(<vscale x 2 x double> %a,
1277 <vscale x 2 x i1> %pg,
1278 <vscale x 2 x double> %b)
1279 ret <vscale x 2 x double> %out
1286 define <vscale x 8 x half> @frintx_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
1287 ; CHECK-LABEL: frintx_h:
1289 ; CHECK-NEXT: frintx z0.h, p0/m, z1.h
1291 %out = call <vscale x 8 x half> @llvm.aarch64.sve.frintx.nxv8f16(<vscale x 8 x half> %a,
1292 <vscale x 8 x i1> %pg,
1293 <vscale x 8 x half> %b)
1294 ret <vscale x 8 x half> %out
1297 define <vscale x 4 x float> @frintx_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
1298 ; CHECK-LABEL: frintx_s:
1300 ; CHECK-NEXT: frintx z0.s, p0/m, z1.s
1302 %out = call <vscale x 4 x float> @llvm.aarch64.sve.frintx.nxv4f32(<vscale x 4 x float> %a,
1303 <vscale x 4 x i1> %pg,
1304 <vscale x 4 x float> %b)
1305 ret <vscale x 4 x float> %out
1308 define <vscale x 2 x double> @frintx_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
1309 ; CHECK-LABEL: frintx_d:
1311 ; CHECK-NEXT: frintx z0.d, p0/m, z1.d
1313 %out = call <vscale x 2 x double> @llvm.aarch64.sve.frintx.nxv2f64(<vscale x 2 x double> %a,
1314 <vscale x 2 x i1> %pg,
1315 <vscale x 2 x double> %b)
1316 ret <vscale x 2 x double> %out
1323 define <vscale x 8 x half> @frintz_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
1324 ; CHECK-LABEL: frintz_h:
1326 ; CHECK-NEXT: frintz z0.h, p0/m, z1.h
1328 %out = call <vscale x 8 x half> @llvm.aarch64.sve.frintz.nxv8f16(<vscale x 8 x half> %a,
1329 <vscale x 8 x i1> %pg,
1330 <vscale x 8 x half> %b)
1331 ret <vscale x 8 x half> %out
1334 define <vscale x 4 x float> @frintz_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
1335 ; CHECK-LABEL: frintz_s:
1337 ; CHECK-NEXT: frintz z0.s, p0/m, z1.s
1339 %out = call <vscale x 4 x float> @llvm.aarch64.sve.frintz.nxv4f32(<vscale x 4 x float> %a,
1340 <vscale x 4 x i1> %pg,
1341 <vscale x 4 x float> %b)
1342 ret <vscale x 4 x float> %out
1345 define <vscale x 2 x double> @frintz_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
1346 ; CHECK-LABEL: frintz_d:
1348 ; CHECK-NEXT: frintz z0.d, p0/m, z1.d
1350 %out = call <vscale x 2 x double> @llvm.aarch64.sve.frintz.nxv2f64(<vscale x 2 x double> %a,
1351 <vscale x 2 x i1> %pg,
1352 <vscale x 2 x double> %b)
1353 ret <vscale x 2 x double> %out
1360 define <vscale x 8 x half> @frsqrte_h(<vscale x 8 x half> %a) {
1361 ; CHECK-LABEL: frsqrte_h:
1363 ; CHECK-NEXT: frsqrte z0.h, z0.h
1365 %out = call <vscale x 8 x half> @llvm.aarch64.sve.frsqrte.x.nxv8f16(<vscale x 8 x half> %a)
1366 ret <vscale x 8 x half> %out
1369 define <vscale x 4 x float> @frsqrte_s(<vscale x 4 x float> %a) {
1370 ; CHECK-LABEL: frsqrte_s:
1372 ; CHECK-NEXT: frsqrte z0.s, z0.s
1374 %out = call <vscale x 4 x float> @llvm.aarch64.sve.frsqrte.x.nxv4f32(<vscale x 4 x float> %a)
1375 ret <vscale x 4 x float> %out
1378 define <vscale x 2 x double> @frsqrte_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a) {
1379 ; CHECK-LABEL: frsqrte_d:
1381 ; CHECK-NEXT: frsqrte z0.d, z0.d
1383 %out = call <vscale x 2 x double> @llvm.aarch64.sve.frsqrte.x.nxv2f64(<vscale x 2 x double> %a)
1384 ret <vscale x 2 x double> %out
1391 define <vscale x 8 x half> @fscale_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x i16> %b) {
1392 ; CHECK-LABEL: fscale_h:
1394 ; CHECK-NEXT: fscale z0.h, p0/m, z0.h, z1.h
1396 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fscale.nxv8f16(<vscale x 8 x i1> %pg,
1397 <vscale x 8 x half> %a,
1398 <vscale x 8 x i16> %b)
1399 ret <vscale x 8 x half> %out
1402 define <vscale x 4 x float> @fscale_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x i32> %b) {
1403 ; CHECK-LABEL: fscale_s:
1405 ; CHECK-NEXT: fscale z0.s, p0/m, z0.s, z1.s
1407 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fscale.nxv4f32(<vscale x 4 x i1> %pg,
1408 <vscale x 4 x float> %a,
1409 <vscale x 4 x i32> %b)
1410 ret <vscale x 4 x float> %out
1413 define <vscale x 2 x double> @fscale_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x i64> %b) {
1414 ; CHECK-LABEL: fscale_d:
1416 ; CHECK-NEXT: fscale z0.d, p0/m, z0.d, z1.d
1418 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fscale.nxv2f64(<vscale x 2 x i1> %pg,
1419 <vscale x 2 x double> %a,
1420 <vscale x 2 x i64> %b)
1421 ret <vscale x 2 x double> %out
1428 define <vscale x 8 x half> @fsqrt_h(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
1429 ; CHECK-LABEL: fsqrt_h:
1431 ; CHECK-NEXT: fsqrt z0.h, p0/m, z1.h
1433 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsqrt.nxv8f16(<vscale x 8 x half> %a,
1434 <vscale x 8 x i1> %pg,
1435 <vscale x 8 x half> %b)
1436 ret <vscale x 8 x half> %out
1439 define <vscale x 4 x float> @fsqrt_s(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
1440 ; CHECK-LABEL: fsqrt_s:
1442 ; CHECK-NEXT: fsqrt z0.s, p0/m, z1.s
1444 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsqrt.nxv4f32(<vscale x 4 x float> %a,
1445 <vscale x 4 x i1> %pg,
1446 <vscale x 4 x float> %b)
1447 ret <vscale x 4 x float> %out
1450 define <vscale x 2 x double> @fsqrt_d(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
1451 ; CHECK-LABEL: fsqrt_d:
1453 ; CHECK-NEXT: fsqrt z0.d, p0/m, z1.d
1455 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsqrt.nxv2f64(<vscale x 2 x double> %a,
1456 <vscale x 2 x i1> %pg,
1457 <vscale x 2 x double> %b)
1458 ret <vscale x 2 x double> %out
1465 define <vscale x 8 x half> @fsub_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
1466 ; CHECK-LABEL: fsub_h:
1468 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h
1470 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
1471 <vscale x 8 x half> %a,
1472 <vscale x 8 x half> %b)
1473 ret <vscale x 8 x half> %out
1476 define <vscale x 4 x float> @fsub_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
1477 ; CHECK-LABEL: fsub_s:
1479 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s
1481 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
1482 <vscale x 4 x float> %a,
1483 <vscale x 4 x float> %b)
1484 ret <vscale x 4 x float> %out
1487 define <vscale x 2 x double> @fsub_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
1488 ; CHECK-LABEL: fsub_d:
1490 ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d
1492 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
1493 <vscale x 2 x double> %a,
1494 <vscale x 2 x double> %b)
1495 ret <vscale x 2 x double> %out
1502 define <vscale x 8 x half> @fsubr_h(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
1503 ; CHECK-LABEL: fsubr_h:
1505 ; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h
1507 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %pg,
1508 <vscale x 8 x half> %a,
1509 <vscale x 8 x half> %b)
1510 ret <vscale x 8 x half> %out
1513 define <vscale x 4 x float> @fsubr_s(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
1514 ; CHECK-LABEL: fsubr_s:
1516 ; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, z1.s
1518 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %pg,
1519 <vscale x 4 x float> %a,
1520 <vscale x 4 x float> %b)
1521 ret <vscale x 4 x float> %out
1524 define <vscale x 2 x double> @fsubr_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
1525 ; CHECK-LABEL: fsubr_d:
1527 ; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, z1.d
1529 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %pg,
1530 <vscale x 2 x double> %a,
1531 <vscale x 2 x double> %b)
1532 ret <vscale x 2 x double> %out
1539 define <vscale x 8 x half> @ftmad_h(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
1540 ; CHECK-LABEL: ftmad_h:
1542 ; CHECK-NEXT: ftmad z0.h, z0.h, z1.h, #0
1544 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ftmad.x.nxv8f16(<vscale x 8 x half> %a,
1545 <vscale x 8 x half> %b,
1547 ret <vscale x 8 x half> %out
1550 define <vscale x 4 x float> @ftmad_s(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
1551 ; CHECK-LABEL: ftmad_s:
1553 ; CHECK-NEXT: ftmad z0.s, z0.s, z1.s, #0
1555 %out = call <vscale x 4 x float> @llvm.aarch64.sve.ftmad.x.nxv4f32(<vscale x 4 x float> %a,
1556 <vscale x 4 x float> %b,
1558 ret <vscale x 4 x float> %out
1561 define <vscale x 2 x double> @ftmad_d(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
1562 ; CHECK-LABEL: ftmad_d:
1564 ; CHECK-NEXT: ftmad z0.d, z0.d, z1.d, #7
1566 %out = call <vscale x 2 x double> @llvm.aarch64.sve.ftmad.x.nxv2f64(<vscale x 2 x double> %a,
1567 <vscale x 2 x double> %b,
1569 ret <vscale x 2 x double> %out
1576 define <vscale x 8 x half> @ftsmul_h(<vscale x 8 x half> %a, <vscale x 8 x i16> %b) {
1577 ; CHECK-LABEL: ftsmul_h:
1579 ; CHECK-NEXT: ftsmul z0.h, z0.h, z1.h
1581 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ftsmul.x.nxv8f16(<vscale x 8 x half> %a,
1582 <vscale x 8 x i16> %b)
1583 ret <vscale x 8 x half> %out
1586 define <vscale x 4 x float> @ftsmul_s(<vscale x 4 x float> %a, <vscale x 4 x i32> %b) {
1587 ; CHECK-LABEL: ftsmul_s:
1589 ; CHECK-NEXT: ftsmul z0.s, z0.s, z1.s
1591 %out = call <vscale x 4 x float> @llvm.aarch64.sve.ftsmul.x.nxv4f32(<vscale x 4 x float> %a,
1592 <vscale x 4 x i32> %b)
1593 ret <vscale x 4 x float> %out
1596 define <vscale x 2 x double> @ftsmul_d(<vscale x 2 x double> %a, <vscale x 2 x i64> %b) {
1597 ; CHECK-LABEL: ftsmul_d:
1599 ; CHECK-NEXT: ftsmul z0.d, z0.d, z1.d
1601 %out = call <vscale x 2 x double> @llvm.aarch64.sve.ftsmul.x.nxv2f64(<vscale x 2 x double> %a,
1602 <vscale x 2 x i64> %b)
1603 ret <vscale x 2 x double> %out
1610 define <vscale x 8 x half> @ftssel_h(<vscale x 8 x half> %a, <vscale x 8 x i16> %b) {
1611 ; CHECK-LABEL: ftssel_h:
1613 ; CHECK-NEXT: ftssel z0.h, z0.h, z1.h
1615 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ftssel.x.nxv8f16(<vscale x 8 x half> %a,
1616 <vscale x 8 x i16> %b)
1617 ret <vscale x 8 x half> %out
1620 define <vscale x 4 x float> @ftssel_s(<vscale x 4 x float> %a, <vscale x 4 x i32> %b) {
1621 ; CHECK-LABEL: ftssel_s:
1623 ; CHECK-NEXT: ftssel z0.s, z0.s, z1.s
1625 %out = call <vscale x 4 x float> @llvm.aarch64.sve.ftssel.x.nxv4f32(<vscale x 4 x float> %a,
1626 <vscale x 4 x i32> %b)
1627 ret <vscale x 4 x float> %out
1630 define <vscale x 2 x double> @ftssel_d(<vscale x 2 x double> %a, <vscale x 2 x i64> %b) {
1631 ; CHECK-LABEL: ftssel_d:
1633 ; CHECK-NEXT: ftssel z0.d, z0.d, z1.d
1635 %out = call <vscale x 2 x double> @llvm.aarch64.sve.ftssel.x.nxv2f64(<vscale x 2 x double> %a,
1636 <vscale x 2 x i64> %b)
1637 ret <vscale x 2 x double> %out
1640 declare <vscale x 8 x half> @llvm.aarch64.sve.fabd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1641 declare <vscale x 4 x float> @llvm.aarch64.sve.fabd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1642 declare <vscale x 2 x double> @llvm.aarch64.sve.fabd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1644 declare <vscale x 8 x half> @llvm.aarch64.sve.fabs.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1645 declare <vscale x 4 x float> @llvm.aarch64.sve.fabs.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1646 declare <vscale x 2 x double> @llvm.aarch64.sve.fabs.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1648 declare <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1649 declare <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1650 declare <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1652 declare <vscale x 8 x half> @llvm.aarch64.sve.fcadd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
1653 declare <vscale x 4 x float> @llvm.aarch64.sve.fcadd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, i32)
1654 declare <vscale x 2 x double> @llvm.aarch64.sve.fcadd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, i32)
1656 declare <vscale x 8 x half> @llvm.aarch64.sve.fcmla.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
1657 declare <vscale x 4 x float> @llvm.aarch64.sve.fcmla.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, i32)
1658 declare <vscale x 2 x double> @llvm.aarch64.sve.fcmla.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, i32)
1660 declare <vscale x 8 x half> @llvm.aarch64.sve.fcmla.lane.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32, i32)
1661 declare <vscale x 4 x float> @llvm.aarch64.sve.fcmla.lane.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, i32, i32)
1663 declare <vscale x 8 x half> @llvm.aarch64.sve.fdiv.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1664 declare <vscale x 4 x float> @llvm.aarch64.sve.fdiv.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1665 declare <vscale x 2 x double> @llvm.aarch64.sve.fdiv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1667 declare <vscale x 8 x half> @llvm.aarch64.sve.fdivr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1668 declare <vscale x 4 x float> @llvm.aarch64.sve.fdivr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1669 declare <vscale x 2 x double> @llvm.aarch64.sve.fdivr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1671 declare <vscale x 8 x half> @llvm.aarch64.sve.fexpa.x.nxv8f16(<vscale x 8 x i16>)
1672 declare <vscale x 4 x float> @llvm.aarch64.sve.fexpa.x.nxv4f32(<vscale x 4 x i32>)
1673 declare <vscale x 2 x double> @llvm.aarch64.sve.fexpa.x.nxv2f64(<vscale x 2 x i64>)
1675 declare <vscale x 8 x half> @llvm.aarch64.sve.fmad.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
1676 declare <vscale x 4 x float> @llvm.aarch64.sve.fmad.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1677 declare <vscale x 2 x double> @llvm.aarch64.sve.fmad.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
1679 declare <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1680 declare <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1681 declare <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1683 declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1684 declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1685 declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1687 declare <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1688 declare <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1689 declare <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1691 declare <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1692 declare <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1693 declare <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1695 declare <vscale x 8 x half> @llvm.aarch64.sve.fmla.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
1696 declare <vscale x 4 x float> @llvm.aarch64.sve.fmla.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1697 declare <vscale x 2 x double> @llvm.aarch64.sve.fmla.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
1699 declare <vscale x 8 x half> @llvm.aarch64.sve.fmla.lane.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
1700 declare <vscale x 4 x float> @llvm.aarch64.sve.fmla.lane.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, i32)
1701 declare <vscale x 2 x double> @llvm.aarch64.sve.fmla.lane.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, i32)
1703 declare <vscale x 8 x half> @llvm.aarch64.sve.fmls.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
1704 declare <vscale x 4 x float> @llvm.aarch64.sve.fmls.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1705 declare <vscale x 2 x double> @llvm.aarch64.sve.fmls.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
1707 declare <vscale x 8 x half> @llvm.aarch64.sve.fmls.lane.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
1708 declare <vscale x 4 x float> @llvm.aarch64.sve.fmls.lane.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, i32)
1709 declare <vscale x 2 x double> @llvm.aarch64.sve.fmls.lane.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, i32)
1711 declare <vscale x 8 x half> @llvm.aarch64.sve.fmsb.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
1712 declare <vscale x 4 x float> @llvm.aarch64.sve.fmsb.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1713 declare <vscale x 2 x double> @llvm.aarch64.sve.fmsb.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
1715 declare <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1716 declare <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1717 declare <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1719 declare <vscale x 8 x half> @llvm.aarch64.sve.fmul.lane.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, i32)
1720 declare <vscale x 4 x float> @llvm.aarch64.sve.fmul.lane.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, i32)
1721 declare <vscale x 2 x double> @llvm.aarch64.sve.fmul.lane.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, i32)
1723 declare <vscale x 8 x half> @llvm.aarch64.sve.fmulx.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1724 declare <vscale x 4 x float> @llvm.aarch64.sve.fmulx.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1725 declare <vscale x 2 x double> @llvm.aarch64.sve.fmulx.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1727 declare <vscale x 8 x half> @llvm.aarch64.sve.fneg.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1728 declare <vscale x 4 x float> @llvm.aarch64.sve.fneg.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1729 declare <vscale x 2 x double> @llvm.aarch64.sve.fneg.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1731 declare <vscale x 8 x half> @llvm.aarch64.sve.fnmad.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
1732 declare <vscale x 4 x float> @llvm.aarch64.sve.fnmad.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1733 declare <vscale x 2 x double> @llvm.aarch64.sve.fnmad.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
1735 declare <vscale x 8 x half> @llvm.aarch64.sve.fnmla.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
1736 declare <vscale x 4 x float> @llvm.aarch64.sve.fnmla.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1737 declare <vscale x 2 x double> @llvm.aarch64.sve.fnmla.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
1739 declare <vscale x 8 x half> @llvm.aarch64.sve.fnmls.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
1740 declare <vscale x 4 x float> @llvm.aarch64.sve.fnmls.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1741 declare <vscale x 2 x double> @llvm.aarch64.sve.fnmls.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
1743 declare <vscale x 8 x half> @llvm.aarch64.sve.fnmsb.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
1744 declare <vscale x 4 x float> @llvm.aarch64.sve.fnmsb.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>)
1745 declare <vscale x 2 x double> @llvm.aarch64.sve.fnmsb.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>)
1747 declare <vscale x 8 x half> @llvm.aarch64.sve.frecpe.x.nxv8f16(<vscale x 8 x half>)
1748 declare <vscale x 4 x float> @llvm.aarch64.sve.frecpe.x.nxv4f32(<vscale x 4 x float>)
1749 declare <vscale x 2 x double> @llvm.aarch64.sve.frecpe.x.nxv2f64(<vscale x 2 x double>)
1751 declare <vscale x 8 x half> @llvm.aarch64.sve.frecpx.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1752 declare <vscale x 4 x float> @llvm.aarch64.sve.frecpx.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1753 declare <vscale x 2 x double> @llvm.aarch64.sve.frecpx.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1755 declare <vscale x 8 x half> @llvm.aarch64.sve.frinta.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1756 declare <vscale x 4 x float> @llvm.aarch64.sve.frinta.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1757 declare <vscale x 2 x double> @llvm.aarch64.sve.frinta.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1759 declare <vscale x 8 x half> @llvm.aarch64.sve.frinti.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1760 declare <vscale x 4 x float> @llvm.aarch64.sve.frinti.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1761 declare <vscale x 2 x double> @llvm.aarch64.sve.frinti.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1763 declare <vscale x 8 x half> @llvm.aarch64.sve.frintm.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1764 declare <vscale x 4 x float> @llvm.aarch64.sve.frintm.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1765 declare <vscale x 2 x double> @llvm.aarch64.sve.frintm.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1767 declare <vscale x 8 x half> @llvm.aarch64.sve.frintn.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1768 declare <vscale x 4 x float> @llvm.aarch64.sve.frintn.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1769 declare <vscale x 2 x double> @llvm.aarch64.sve.frintn.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1771 declare <vscale x 8 x half> @llvm.aarch64.sve.frintp.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1772 declare <vscale x 4 x float> @llvm.aarch64.sve.frintp.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1773 declare <vscale x 2 x double> @llvm.aarch64.sve.frintp.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1775 declare <vscale x 8 x half> @llvm.aarch64.sve.frintx.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1776 declare <vscale x 4 x float> @llvm.aarch64.sve.frintx.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1777 declare <vscale x 2 x double> @llvm.aarch64.sve.frintx.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1779 declare <vscale x 8 x half> @llvm.aarch64.sve.frintz.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1780 declare <vscale x 4 x float> @llvm.aarch64.sve.frintz.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1781 declare <vscale x 2 x double> @llvm.aarch64.sve.frintz.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1783 declare <vscale x 8 x half> @llvm.aarch64.sve.frsqrte.x.nxv8f16(<vscale x 8 x half>)
1784 declare <vscale x 4 x float> @llvm.aarch64.sve.frsqrte.x.nxv4f32(<vscale x 4 x float>)
1785 declare <vscale x 2 x double> @llvm.aarch64.sve.frsqrte.x.nxv2f64(<vscale x 2 x double>)
1787 declare <vscale x 8 x half> @llvm.aarch64.sve.fscale.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x i16>)
1788 declare <vscale x 4 x float> @llvm.aarch64.sve.fscale.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x i32>)
1789 declare <vscale x 2 x double> @llvm.aarch64.sve.fscale.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x i64>)
1791 declare <vscale x 8 x half> @llvm.aarch64.sve.fsqrt.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1792 declare <vscale x 4 x float> @llvm.aarch64.sve.fsqrt.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1793 declare <vscale x 2 x double> @llvm.aarch64.sve.fsqrt.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1795 declare <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1796 declare <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1797 declare <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1799 declare <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
1800 declare <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
1801 declare <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
1803 declare <vscale x 8 x half> @llvm.aarch64.sve.ftmad.x.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, i32)
1804 declare <vscale x 4 x float> @llvm.aarch64.sve.ftmad.x.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, i32)
1805 declare <vscale x 2 x double> @llvm.aarch64.sve.ftmad.x.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, i32)
1807 declare <vscale x 8 x half> @llvm.aarch64.sve.ftsmul.x.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i16>)
1808 declare <vscale x 4 x float> @llvm.aarch64.sve.ftsmul.x.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i32>)
1809 declare <vscale x 2 x double> @llvm.aarch64.sve.ftsmul.x.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i64>)
1811 declare <vscale x 8 x half> @llvm.aarch64.sve.ftssel.x.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i16>)
1812 declare <vscale x 4 x float> @llvm.aarch64.sve.ftssel.x.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i32>)
1813 declare <vscale x 2 x double> @llvm.aarch64.sve.ftssel.x.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i64>)