1 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=sve -mattr=+use-experimental-zeroing-pseudos < %s | FileCheck %s
7 define <vscale x 8 x half> @fadd_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
8 ; CHECK-LABEL: fadd_h_zero:
9 ; CHECK: movprfx z0.h, p0/z, z0.h
10 ; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
12 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
13 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %pg,
14 <vscale x 8 x half> %a_z,
15 <vscale x 8 x half> %b)
16 ret <vscale x 8 x half> %out
19 define <vscale x 4 x float> @fadd_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
20 ; CHECK-LABEL: fadd_s_zero:
21 ; CHECK: movprfx z0.s, p0/z, z0.s
22 ; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
24 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
25 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1> %pg,
26 <vscale x 4 x float> %a_z,
27 <vscale x 4 x float> %b)
28 ret <vscale x 4 x float> %out
31 define <vscale x 2 x double> @fadd_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
32 ; CHECK-LABEL: fadd_d_zero:
33 ; CHECK: movprfx z0.d, p0/z, z0.d
34 ; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
36 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
37 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1> %pg,
38 <vscale x 2 x double> %a_z,
39 <vscale x 2 x double> %b)
40 ret <vscale x 2 x double> %out
47 define <vscale x 8 x half> @fmax_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
48 ; CHECK-LABEL: fmax_h_zero:
49 ; CHECK: movprfx z0.h, p0/z, z0.h
50 ; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
52 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
53 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1> %pg,
54 <vscale x 8 x half> %a_z,
55 <vscale x 8 x half> %b)
56 ret <vscale x 8 x half> %out
59 define <vscale x 4 x float> @fmax_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
60 ; CHECK-LABEL: fmax_s_zero:
61 ; CHECK: movprfx z0.s, p0/z, z0.s
62 ; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s
64 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
65 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1> %pg,
66 <vscale x 4 x float> %a_z,
67 <vscale x 4 x float> %b)
68 ret <vscale x 4 x float> %out
71 define <vscale x 2 x double> @fmax_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
72 ; CHECK-LABEL: fmax_d_zero:
73 ; CHECK: movprfx z0.d, p0/z, z0.d
74 ; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d
76 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
77 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1> %pg,
78 <vscale x 2 x double> %a_z,
79 <vscale x 2 x double> %b)
80 ret <vscale x 2 x double> %out
87 define <vscale x 8 x half> @fmaxnm_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
88 ; CHECK-LABEL: fmaxnm_h_zero:
89 ; CHECK: movprfx z0.h, p0/z, z0.h
90 ; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
92 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
93 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1> %pg,
94 <vscale x 8 x half> %a_z,
95 <vscale x 8 x half> %b)
96 ret <vscale x 8 x half> %out
99 define <vscale x 4 x float> @fmaxnm_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
100 ; CHECK-LABEL: fmaxnm_s_zero:
101 ; CHECK: movprfx z0.s, p0/z, z0.s
102 ; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
104 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
105 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1> %pg,
106 <vscale x 4 x float> %a_z,
107 <vscale x 4 x float> %b)
108 ret <vscale x 4 x float> %out
111 define <vscale x 2 x double> @fmaxnm_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
112 ; CHECK-LABEL: fmaxnm_d_zero:
113 ; CHECK: movprfx z0.d, p0/z, z0.d
114 ; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
116 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
117 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1> %pg,
118 <vscale x 2 x double> %a_z,
119 <vscale x 2 x double> %b)
120 ret <vscale x 2 x double> %out
127 define <vscale x 8 x half> @fmin_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
128 ; CHECK-LABEL: fmin_h_zero:
129 ; CHECK: movprfx z0.h, p0/z, z0.h
130 ; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
132 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
133 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1> %pg,
134 <vscale x 8 x half> %a_z,
135 <vscale x 8 x half> %b)
136 ret <vscale x 8 x half> %out
139 define <vscale x 4 x float> @fmin_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
140 ; CHECK-LABEL: fmin_s_zero:
141 ; CHECK: movprfx z0.s, p0/z, z0.s
142 ; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s
144 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
145 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1> %pg,
146 <vscale x 4 x float> %a_z,
147 <vscale x 4 x float> %b)
148 ret <vscale x 4 x float> %out
151 define <vscale x 2 x double> @fmin_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
152 ; CHECK-LABEL: fmin_d_zero:
153 ; CHECK: movprfx z0.d, p0/z, z0.d
154 ; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d
156 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
157 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1> %pg,
158 <vscale x 2 x double> %a_z,
159 <vscale x 2 x double> %b)
160 ret <vscale x 2 x double> %out
167 define <vscale x 8 x half> @fminnm_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
168 ; CHECK-LABEL: fminnm_h_zero:
169 ; CHECK: movprfx z0.h, p0/z, z0.h
170 ; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
172 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
173 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1> %pg,
174 <vscale x 8 x half> %a_z,
175 <vscale x 8 x half> %b)
176 ret <vscale x 8 x half> %out
179 define <vscale x 4 x float> @fminnm_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
180 ; CHECK-LABEL: fminnm_s_zero:
181 ; CHECK: movprfx z0.s, p0/z, z0.s
182 ; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
184 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
185 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1> %pg,
186 <vscale x 4 x float> %a_z,
187 <vscale x 4 x float> %b)
188 ret <vscale x 4 x float> %out
191 define <vscale x 2 x double> @fminnm_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
192 ; CHECK-LABEL: fminnm_d_zero:
193 ; CHECK: movprfx z0.d, p0/z, z0.d
194 ; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
196 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
197 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1> %pg,
198 <vscale x 2 x double> %a_z,
199 <vscale x 2 x double> %b)
200 ret <vscale x 2 x double> %out
207 define <vscale x 8 x half> @fmul_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
208 ; CHECK-LABEL: fmul_h_zero:
209 ; CHECK: movprfx z0.h, p0/z, z0.h
210 ; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
212 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
213 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1> %pg,
214 <vscale x 8 x half> %a_z,
215 <vscale x 8 x half> %b)
216 ret <vscale x 8 x half> %out
219 define <vscale x 4 x float> @fmul_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
220 ; CHECK-LABEL: fmul_s_zero:
221 ; CHECK: movprfx z0.s, p0/z, z0.s
222 ; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s
224 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
225 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %pg,
226 <vscale x 4 x float> %a_z,
227 <vscale x 4 x float> %b)
228 ret <vscale x 4 x float> %out
231 define <vscale x 2 x double> @fmul_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
232 ; CHECK-LABEL: fmul_d_zero:
233 ; CHECK: movprfx z0.d, p0/z, z0.d
234 ; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d
236 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
237 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1> %pg,
238 <vscale x 2 x double> %a_z,
239 <vscale x 2 x double> %b)
240 ret <vscale x 2 x double> %out
247 define <vscale x 8 x half> @fsub_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
248 ; CHECK-LABEL: fsub_h_zero:
249 ; CHECK: movprfx z0.h, p0/z, z0.h
250 ; CHECK-NEXT: fsub z0.h, p0/m, z0.h, z1.h
252 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
253 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1> %pg,
254 <vscale x 8 x half> %a_z,
255 <vscale x 8 x half> %b)
256 ret <vscale x 8 x half> %out
259 define <vscale x 4 x float> @fsub_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
260 ; CHECK-LABEL: fsub_s_zero:
261 ; CHECK: movprfx z0.s, p0/z, z0.s
262 ; CHECK-NEXT: fsub z0.s, p0/m, z0.s, z1.s
264 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
265 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1> %pg,
266 <vscale x 4 x float> %a_z,
267 <vscale x 4 x float> %b)
268 ret <vscale x 4 x float> %out
271 define <vscale x 2 x double> @fsub_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
272 ; CHECK-LABEL: fsub_d_zero:
273 ; CHECK: movprfx z0.d, p0/z, z0.d
274 ; CHECK-NEXT: fsub z0.d, p0/m, z0.d, z1.d
276 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
277 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %pg,
278 <vscale x 2 x double> %a_z,
279 <vscale x 2 x double> %b)
280 ret <vscale x 2 x double> %out
287 define <vscale x 8 x half> @fsubr_h_zero(<vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> %b) {
288 ; CHECK-LABEL: fsubr_h_zero:
289 ; CHECK: movprfx z0.h, p0/z, z0.h
290 ; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h
292 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x half> %a, <vscale x 8 x half> zeroinitializer
293 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1> %pg,
294 <vscale x 8 x half> %a_z,
295 <vscale x 8 x half> %b)
296 ret <vscale x 8 x half> %out
299 define <vscale x 4 x float> @fsubr_s_zero(<vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> %b) {
300 ; CHECK-LABEL: fsubr_s_zero:
301 ; CHECK: movprfx z0.s, p0/z, z0.s
302 ; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, z1.s
304 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x float> %a, <vscale x 4 x float> zeroinitializer
305 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1> %pg,
306 <vscale x 4 x float> %a_z,
307 <vscale x 4 x float> %b)
308 ret <vscale x 4 x float> %out
311 define <vscale x 2 x double> @fsubr_d_zero(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
312 ; CHECK-LABEL: fsubr_d_zero:
313 ; CHECK: movprfx z0.d, p0/z, z0.d
314 ; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, z1.d
316 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> zeroinitializer
317 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1> %pg,
318 <vscale x 2 x double> %a_z,
319 <vscale x 2 x double> %b)
320 ret <vscale x 2 x double> %out
323 declare <vscale x 8 x half> @llvm.aarch64.sve.fabd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
324 declare <vscale x 4 x float> @llvm.aarch64.sve.fabd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
325 declare <vscale x 2 x double> @llvm.aarch64.sve.fabd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
327 declare <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
328 declare <vscale x 4 x float> @llvm.aarch64.sve.fadd.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
329 declare <vscale x 2 x double> @llvm.aarch64.sve.fadd.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
331 declare <vscale x 8 x half> @llvm.aarch64.sve.fdiv.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
332 declare <vscale x 4 x float> @llvm.aarch64.sve.fdiv.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
333 declare <vscale x 2 x double> @llvm.aarch64.sve.fdiv.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
335 declare <vscale x 8 x half> @llvm.aarch64.sve.fdivr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
336 declare <vscale x 4 x float> @llvm.aarch64.sve.fdivr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
337 declare <vscale x 2 x double> @llvm.aarch64.sve.fdivr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
339 declare <vscale x 8 x half> @llvm.aarch64.sve.fmax.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
340 declare <vscale x 4 x float> @llvm.aarch64.sve.fmax.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
341 declare <vscale x 2 x double> @llvm.aarch64.sve.fmax.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
343 declare <vscale x 8 x half> @llvm.aarch64.sve.fmaxnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
344 declare <vscale x 4 x float> @llvm.aarch64.sve.fmaxnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
345 declare <vscale x 2 x double> @llvm.aarch64.sve.fmaxnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
347 declare <vscale x 8 x half> @llvm.aarch64.sve.fmin.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
348 declare <vscale x 4 x float> @llvm.aarch64.sve.fmin.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
349 declare <vscale x 2 x double> @llvm.aarch64.sve.fmin.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
351 declare <vscale x 8 x half> @llvm.aarch64.sve.fminnm.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
352 declare <vscale x 4 x float> @llvm.aarch64.sve.fminnm.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
353 declare <vscale x 2 x double> @llvm.aarch64.sve.fminnm.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
355 declare <vscale x 8 x half> @llvm.aarch64.sve.fmul.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
356 declare <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
357 declare <vscale x 2 x double> @llvm.aarch64.sve.fmul.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
359 declare <vscale x 8 x half> @llvm.aarch64.sve.fmulx.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
360 declare <vscale x 4 x float> @llvm.aarch64.sve.fmulx.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
361 declare <vscale x 2 x double> @llvm.aarch64.sve.fmulx.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
363 declare <vscale x 8 x half> @llvm.aarch64.sve.fsub.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
364 declare <vscale x 4 x float> @llvm.aarch64.sve.fsub.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
365 declare <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
367 declare <vscale x 8 x half> @llvm.aarch64.sve.fsubr.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, <vscale x 8 x half>)
368 declare <vscale x 4 x float> @llvm.aarch64.sve.fsubr.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>, <vscale x 4 x float>)
369 declare <vscale x 2 x double> @llvm.aarch64.sve.fsubr.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)