1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=sve -mattr=+use-experimental-zeroing-pseudos < %s | FileCheck %s
8 define <vscale x 8 x half> @fadd_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
9 ; CHECK-LABEL: fadd_h_zero:
11 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
12 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
14 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
15 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
16 <vscale x 8 x half> %a_z,
17 <vscale x 8 x half> %b)
18 ret <vscale x 8 x half> %out
21 define <vscale x 4 x float> @fadd_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
22 ; CHECK-LABEL: fadd_s_zero:
24 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
25 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
27 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
28 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
29 <vscale x 4 x float> %a_z,
30 <vscale x 4 x float> %b)
31 ret <vscale x 4 x float> %out
34 define <vscale x 2 x double> @fadd_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
35 ; CHECK-LABEL: fadd_d_zero:
37 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
38 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
40 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
41 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
42 <vscale x 2 x double> %a_z,
43 <vscale x 2 x double> %b)
44 ret <vscale x 2 x double> %out
51 define <vscale x 8 x half> @fmax_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
52 ; CHECK-LABEL: fmax_h_zero:
54 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
55 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
57 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
58 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
59 <vscale x 8 x half> %a_z,
60 <vscale x 8 x half> %b)
61 ret <vscale x 8 x half> %out
64 define <vscale x 4 x float> @fmax_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
65 ; CHECK-LABEL: fmax_s_zero:
67 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
68 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s
70 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
71 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
72 <vscale x 4 x float> %a_z,
73 <vscale x 4 x float> %b)
74 ret <vscale x 4 x float> %out
77 define <vscale x 2 x double> @fmax_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
78 ; CHECK-LABEL: fmax_d_zero:
80 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
81 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d
83 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
84 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
85 <vscale x 2 x double> %a_z,
86 <vscale x 2 x double> %b)
87 ret <vscale x 2 x double> %out
94 define <vscale x 8 x half> @fmaxnm_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
95 ; CHECK-LABEL: fmaxnm_h_zero:
97 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
98 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
100 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
101 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
102 <vscale x 8 x half> %a_z,
103 <vscale x 8 x half> %b)
104 ret <vscale x 8 x half> %out
107 define <vscale x 4 x float> @fmaxnm_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
108 ; CHECK-LABEL: fmaxnm_s_zero:
110 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
111 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
113 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
114 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
115 <vscale x 4 x float> %a_z,
116 <vscale x 4 x float> %b)
117 ret <vscale x 4 x float> %out
120 define <vscale x 2 x double> @fmaxnm_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
121 ; CHECK-LABEL: fmaxnm_d_zero:
123 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
124 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
126 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
127 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
128 <vscale x 2 x double> %a_z,
129 <vscale x 2 x double> %b)
130 ret <vscale x 2 x double> %out
137 define <vscale x 8 x half> @fmin_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
138 ; CHECK-LABEL: fmin_h_zero:
140 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
141 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
143 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
144 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
145 <vscale x 8 x half> %a_z,
146 <vscale x 8 x half> %b)
147 ret <vscale x 8 x half> %out
150 define <vscale x 4 x float> @fmin_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
151 ; CHECK-LABEL: fmin_s_zero:
153 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
154 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s
156 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
157 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
158 <vscale x 4 x float> %a_z,
159 <vscale x 4 x float> %b)
160 ret <vscale x 4 x float> %out
163 define <vscale x 2 x double> @fmin_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
164 ; CHECK-LABEL: fmin_d_zero:
166 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
167 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d
169 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
170 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
171 <vscale x 2 x double> %a_z,
172 <vscale x 2 x double> %b)
173 ret <vscale x 2 x double> %out
180 define <vscale x 8 x half> @fminnm_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
181 ; CHECK-LABEL: fminnm_h_zero:
183 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
184 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
186 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
187 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
188 <vscale x 8 x half> %a_z,
189 <vscale x 8 x half> %b)
190 ret <vscale x 8 x half> %out
193 define <vscale x 4 x float> @fminnm_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
194 ; CHECK-LABEL: fminnm_s_zero:
196 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
197 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
199 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
200 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
201 <vscale x 4 x float> %a_z,
202 <vscale x 4 x float> %b)
203 ret <vscale x 4 x float> %out
206 define <vscale x 2 x double> @fminnm_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
207 ; CHECK-LABEL: fminnm_d_zero:
209 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
210 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
212 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
213 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
214 <vscale x 2 x double> %a_z,
215 <vscale x 2 x double> %b)
216 ret <vscale x 2 x double> %out
223 define <vscale x 8 x half> @fmul_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
224 ; CHECK-LABEL: fmul_h_zero:
226 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
227 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
229 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
230 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
231 <vscale x 8 x half> %a_z,
232 <vscale x 8 x half> %b)
233 ret <vscale x 8 x half> %out
236 define <vscale x 4 x float> @fmul_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
237 ; CHECK-LABEL: fmul_s_zero:
239 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
240 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s
242 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
243 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
244 <vscale x 4 x float> %a_z,
245 <vscale x 4 x float> %b)
246 ret <vscale x 4 x float> %out
249 define <vscale x 2 x double> @fmul_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
250 ; CHECK-LABEL: fmul_d_zero:
252 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
253 ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d
255 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
256 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
257 <vscale x 2 x double> %a_z,
258 <vscale x 2 x double> %b)
259 ret <vscale x 2 x double> %out
266 define <vscale x 8 x half> @fsub_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
267 ; CHECK-LABEL: fsub_h_zero:
269 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
270 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h
272 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
273 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
274 <vscale x 8 x half> %a_z,
275 <vscale x 8 x half> %b)
276 ret <vscale x 8 x half> %out
279 define <vscale x 4 x float> @fsub_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
280 ; CHECK-LABEL: fsub_s_zero:
282 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
283 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s
285 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
286 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
287 <vscale x 4 x float> %a_z,
288 <vscale x 4 x float> %b)
289 ret <vscale x 4 x float> %out
292 define <vscale x 2 x double> @fsub_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
293 ; CHECK-LABEL: fsub_d_zero:
295 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
296 ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d
298 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
299 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
300 <vscale x 2 x double> %a_z,
301 <vscale x 2 x double> %b)
302 ret <vscale x 2 x double> %out
309 define <vscale x 8 x half> @fsubr_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
310 ; CHECK-LABEL: fsubr_h_zero:
312 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
313 ; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h
315 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
316 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %pg,
317 <vscale x 8 x half> %a_z,
318 <vscale x 8 x half> %b)
319 ret <vscale x 8 x half> %out
322 define <vscale x 4 x float> @fsubr_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
323 ; CHECK-LABEL: fsubr_s_zero:
325 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
326 ; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, z1.s
328 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
329 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %pg,
330 <vscale x 4 x float> %a_z,
331 <vscale x 4 x float> %b)
332 ret <vscale x 4 x float> %out
335 define <vscale x 2 x double> @fsubr_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
336 ; CHECK-LABEL: fsubr_d_zero:
338 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
339 ; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, z1.d
341 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
342 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %pg,
343 <vscale x 2 x double> %a_z,
344 <vscale x 2 x double> %b)
345 ret <vscale x 2 x double> %out
348 declare <vscale x 8 x half> @llvm.aarch64.sve.fabd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
349 declare <vscale x 4 x float> @llvm.aarch64.sve.fabd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
350 declare <vscale x 2 x double> @llvm.aarch64.sve.fabd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
352 declare <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
353 declare <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
354 declare <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
356 declare <vscale x 8 x half> @llvm.aarch64.sve.fdiv.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
357 declare <vscale x 4 x float> @llvm.aarch64.sve.fdiv.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
358 declare <vscale x 2 x double> @llvm.aarch64.sve.fdiv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
360 declare <vscale x 8 x half> @llvm.aarch64.sve.fdivr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
361 declare <vscale x 4 x float> @llvm.aarch64.sve.fdivr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
362 declare <vscale x 2 x double> @llvm.aarch64.sve.fdivr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
364 declare <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
365 declare <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
366 declare <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
368 declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
369 declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
370 declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
372 declare <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
373 declare <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
374 declare <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
376 declare <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
377 declare <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
378 declare <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
380 declare <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
381 declare <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
382 declare <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
384 declare <vscale x 8 x half> @llvm.aarch64.sve.fmulx.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
385 declare <vscale x 4 x float> @llvm.aarch64.sve.fmulx.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
386 declare <vscale x 2 x double> @llvm.aarch64.sve.fmulx.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
388 declare <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
389 declare <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
390 declare <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
392 declare <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
393 declare <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
394 declare <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)