1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
3 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme < %s | FileCheck %s
9 define <vscale x 8 x half> @fcvt_f16_f32(<vscale x 8 x half> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
10 ; CHECK-LABEL: fcvt_f16_f32:
12 ; CHECK-NEXT: fcvt z0.h, p0/m, z1.s
14 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half> %a,
15 <vscale x 4 x i1> %pg,
16 <vscale x 4 x float> %b)
17 ret <vscale x 8 x half> %out
20 define <vscale x 8 x half> @fcvt_f16_f64(<vscale x 8 x half> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
21 ; CHECK-LABEL: fcvt_f16_f64:
23 ; CHECK-NEXT: fcvt z0.h, p0/m, z1.d
25 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half> %a,
26 <vscale x 2 x i1> %pg,
27 <vscale x 2 x double> %b)
28 ret <vscale x 8 x half> %out
31 define <vscale x 4 x float> @fcvt_f32_f16(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 8 x half> %b) {
32 ; CHECK-LABEL: fcvt_f32_f16:
34 ; CHECK-NEXT: fcvt z0.s, p0/m, z1.h
36 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f16(<vscale x 4 x float> %a,
37 <vscale x 4 x i1> %pg,
38 <vscale x 8 x half> %b)
39 ret <vscale x 4 x float> %out
42 define <vscale x 4 x float> @fcvt_f32_f64(<vscale x 4 x float> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
43 ; CHECK-LABEL: fcvt_f32_f64:
45 ; CHECK-NEXT: fcvt z0.s, p0/m, z1.d
47 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f64(<vscale x 4 x float> %a,
48 <vscale x 2 x i1> %pg,
49 <vscale x 2 x double> %b)
50 ret <vscale x 4 x float> %out
53 define <vscale x 2 x double> @fcvt_f64_f16(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 8 x half> %b) {
54 ; CHECK-LABEL: fcvt_f64_f16:
56 ; CHECK-NEXT: fcvt z0.d, p0/m, z1.h
58 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f16(<vscale x 2 x double> %a,
59 <vscale x 2 x i1> %pg,
60 <vscale x 8 x half> %b)
61 ret <vscale x 2 x double> %out
64 define <vscale x 2 x double> @fcvt_f64_f32(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 4 x float> %b) {
65 ; CHECK-LABEL: fcvt_f64_f32:
67 ; CHECK-NEXT: fcvt z0.d, p0/m, z1.s
69 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f32(<vscale x 2 x double> %a,
70 <vscale x 2 x i1> %pg,
71 <vscale x 4 x float> %b)
72 ret <vscale x 2 x double> %out
79 define <vscale x 8 x i16> @fcvtzs_i16_f16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
80 ; CHECK-LABEL: fcvtzs_i16_f16:
82 ; CHECK-NEXT: fcvtzs z0.h, p0/m, z1.h
84 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzs.nxv8i16.nxv8f16(<vscale x 8 x i16> %a,
85 <vscale x 8 x i1> %pg,
86 <vscale x 8 x half> %b)
87 ret <vscale x 8 x i16> %out
90 define <vscale x 4 x i32> @fcvtzs_i32_f32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
91 ; CHECK-LABEL: fcvtzs_i32_f32:
93 ; CHECK-NEXT: fcvtzs z0.s, p0/m, z1.s
95 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.nxv4i32.nxv4f32(<vscale x 4 x i32> %a,
96 <vscale x 4 x i1> %pg,
97 <vscale x 4 x float> %b)
98 ret <vscale x 4 x i32> %out
101 define <vscale x 2 x i64> @fcvtzs_i64_f64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
102 ; CHECK-LABEL: fcvtzs_i64_f64:
104 ; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.d
106 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.nxv2i64.nxv2f64(<vscale x 2 x i64> %a,
107 <vscale x 2 x i1> %pg,
108 <vscale x 2 x double> %b)
109 ret <vscale x 2 x i64> %out
112 define <vscale x 4 x i32> @fcvtzs_i32_f16(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 8 x half> %b) {
113 ; CHECK-LABEL: fcvtzs_i32_f16:
115 ; CHECK-NEXT: fcvtzs z0.s, p0/m, z1.h
117 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f16(<vscale x 4 x i32> %a,
118 <vscale x 4 x i1> %pg,
119 <vscale x 8 x half> %b)
120 ret <vscale x 4 x i32> %out
123 define <vscale x 4 x i32> @fcvtzs_i32_f64(<vscale x 4 x i32> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
124 ; CHECK-LABEL: fcvtzs_i32_f64:
126 ; CHECK-NEXT: fcvtzs z0.s, p0/m, z1.d
128 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f64(<vscale x 4 x i32> %a,
129 <vscale x 2 x i1> %pg,
130 <vscale x 2 x double> %b)
131 ret <vscale x 4 x i32> %out
134 define <vscale x 2 x i64> @fcvtzs_i64_f16(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 8 x half> %b) {
135 ; CHECK-LABEL: fcvtzs_i64_f16:
137 ; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.h
139 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f16(<vscale x 2 x i64> %a,
140 <vscale x 2 x i1> %pg,
141 <vscale x 8 x half> %b)
142 ret <vscale x 2 x i64> %out
145 define <vscale x 2 x i64> @fcvtzs_i64_f32(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 4 x float> %b) {
146 ; CHECK-LABEL: fcvtzs_i64_f32:
148 ; CHECK-NEXT: fcvtzs z0.d, p0/m, z1.s
150 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f32(<vscale x 2 x i64> %a,
151 <vscale x 2 x i1> %pg,
152 <vscale x 4 x float> %b)
153 ret <vscale x 2 x i64> %out
160 define <vscale x 8 x i16> @fcvtzu_i16_f16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
161 ; CHECK-LABEL: fcvtzu_i16_f16:
163 ; CHECK-NEXT: fcvtzu z0.h, p0/m, z1.h
165 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzu.nxv8i16.nxv8f16(<vscale x 8 x i16> %a,
166 <vscale x 8 x i1> %pg,
167 <vscale x 8 x half> %b)
168 ret <vscale x 8 x i16> %out
171 define <vscale x 4 x i32> @fcvtzu_i32_f32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
172 ; CHECK-LABEL: fcvtzu_i32_f32:
174 ; CHECK-NEXT: fcvtzu z0.s, p0/m, z1.s
176 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.nxv4i32.nxv4f32(<vscale x 4 x i32> %a,
177 <vscale x 4 x i1> %pg,
178 <vscale x 4 x float> %b)
179 ret <vscale x 4 x i32> %out
182 define <vscale x 2 x i64> @fcvtzu_i64_f64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
183 ; CHECK-LABEL: fcvtzu_i64_f64:
185 ; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.d
187 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.nxv2i64.nxv2f64(<vscale x 2 x i64> %a,
188 <vscale x 2 x i1> %pg,
189 <vscale x 2 x double> %b)
190 ret <vscale x 2 x i64> %out
193 define <vscale x 4 x i32> @fcvtzu_i32_f16(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 8 x half> %b) {
194 ; CHECK-LABEL: fcvtzu_i32_f16:
196 ; CHECK-NEXT: fcvtzu z0.s, p0/m, z1.h
198 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f16(<vscale x 4 x i32> %a,
199 <vscale x 4 x i1> %pg,
200 <vscale x 8 x half> %b)
201 ret <vscale x 4 x i32> %out
204 define <vscale x 4 x i32> @fcvtzu_i32_f64(<vscale x 4 x i32> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
205 ; CHECK-LABEL: fcvtzu_i32_f64:
207 ; CHECK-NEXT: fcvtzu z0.s, p0/m, z1.d
209 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f64(<vscale x 4 x i32> %a,
210 <vscale x 2 x i1> %pg,
211 <vscale x 2 x double> %b)
212 ret <vscale x 4 x i32> %out
215 define <vscale x 2 x i64> @fcvtzu_i64_f16(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 8 x half> %b) {
216 ; CHECK-LABEL: fcvtzu_i64_f16:
218 ; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.h
220 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f16(<vscale x 2 x i64> %a,
221 <vscale x 2 x i1> %pg,
222 <vscale x 8 x half> %b)
223 ret <vscale x 2 x i64> %out
226 define <vscale x 2 x i64> @fcvtzu_i64_f32(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 4 x float> %b) {
227 ; CHECK-LABEL: fcvtzu_i64_f32:
229 ; CHECK-NEXT: fcvtzu z0.d, p0/m, z1.s
231 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f32(<vscale x 2 x i64> %a,
232 <vscale x 2 x i1> %pg,
233 <vscale x 4 x float> %b)
234 ret <vscale x 2 x i64> %out
241 define <vscale x 8 x half> @scvtf_f16_i16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) {
242 ; CHECK-LABEL: scvtf_f16_i16:
244 ; CHECK-NEXT: scvtf z0.h, p0/m, z1.h
246 %out = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.nxv8f16.nxv8i16(<vscale x 8 x half> %a,
247 <vscale x 8 x i1> %pg,
248 <vscale x 8 x i16> %b)
249 ret <vscale x 8 x half> %out
252 define <vscale x 4 x float> @scvtf_f32_i32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
253 ; CHECK-LABEL: scvtf_f32_i32:
255 ; CHECK-NEXT: scvtf z0.s, p0/m, z1.s
257 %out = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32(<vscale x 4 x float> %a,
258 <vscale x 4 x i1> %pg,
259 <vscale x 4 x i32> %b)
260 ret <vscale x 4 x float> %out
263 define <vscale x 2 x double> @scvtf_f64_i64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
264 ; CHECK-LABEL: scvtf_f64_i64:
266 ; CHECK-NEXT: scvtf z0.d, p0/m, z1.d
268 %out = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64(<vscale x 2 x double> %a,
269 <vscale x 2 x i1> %pg,
270 <vscale x 2 x i64> %b)
271 ret <vscale x 2 x double> %out
274 define <vscale x 8 x half> @scvtf_f16_i32(<vscale x 8 x half> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
275 ; CHECK-LABEL: scvtf_f16_i32:
277 ; CHECK-NEXT: scvtf z0.h, p0/m, z1.s
279 %out = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i32(<vscale x 8 x half> %a,
280 <vscale x 4 x i1> %pg,
281 <vscale x 4 x i32> %b)
282 ret <vscale x 8 x half> %out
285 define <vscale x 8 x half> @scvtf_f16_i64(<vscale x 8 x half> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
286 ; CHECK-LABEL: scvtf_f16_i64:
288 ; CHECK-NEXT: scvtf z0.h, p0/m, z1.d
290 %out = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i64(<vscale x 8 x half> %a,
291 <vscale x 2 x i1> %pg,
292 <vscale x 2 x i64> %b)
293 ret <vscale x 8 x half> %out
296 define <vscale x 4 x float> @scvtf_f32_i64(<vscale x 4 x float> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
297 ; CHECK-LABEL: scvtf_f32_i64:
299 ; CHECK-NEXT: scvtf z0.s, p0/m, z1.d
301 %out = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.f32i64(<vscale x 4 x float> %a,
302 <vscale x 2 x i1> %pg,
303 <vscale x 2 x i64> %b)
304 ret <vscale x 4 x float> %out
307 define <vscale x 2 x double> @scvtf_f64_i32(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 4 x i32> %b) {
308 ; CHECK-LABEL: scvtf_f64_i32:
310 ; CHECK-NEXT: scvtf z0.d, p0/m, z1.s
312 %out = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.f64i32(<vscale x 2 x double> %a,
313 <vscale x 2 x i1> %pg,
314 <vscale x 4 x i32> %b)
315 ret <vscale x 2 x double> %out
322 define <vscale x 8 x half> @ucvtf_f16_i16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) {
323 ; CHECK-LABEL: ucvtf_f16_i16:
325 ; CHECK-NEXT: ucvtf z0.h, p0/m, z1.h
327 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.nxv8f16.nxv8i16(<vscale x 8 x half> %a,
328 <vscale x 8 x i1> %pg,
329 <vscale x 8 x i16> %b)
330 ret <vscale x 8 x half> %out
333 define <vscale x 4 x float> @ucvtf_f32_i32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
334 ; CHECK-LABEL: ucvtf_f32_i32:
336 ; CHECK-NEXT: ucvtf z0.s, p0/m, z1.s
338 %out = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32(<vscale x 4 x float> %a,
339 <vscale x 4 x i1> %pg,
340 <vscale x 4 x i32> %b)
341 ret <vscale x 4 x float> %out
344 define <vscale x 2 x double> @ucvtf_f64_i64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
345 ; CHECK-LABEL: ucvtf_f64_i64:
347 ; CHECK-NEXT: ucvtf z0.d, p0/m, z1.d
349 %out = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64(<vscale x 2 x double> %a,
350 <vscale x 2 x i1> %pg,
351 <vscale x 2 x i64> %b)
352 ret <vscale x 2 x double> %out
355 define <vscale x 8 x half> @ucvtf_f16_i32(<vscale x 8 x half> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
356 ; CHECK-LABEL: ucvtf_f16_i32:
358 ; CHECK-NEXT: ucvtf z0.h, p0/m, z1.s
360 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i32(<vscale x 8 x half> %a,
361 <vscale x 4 x i1> %pg,
362 <vscale x 4 x i32> %b)
363 ret <vscale x 8 x half> %out
366 define <vscale x 8 x half> @ucvtf_f16_i64(<vscale x 8 x half> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
367 ; CHECK-LABEL: ucvtf_f16_i64:
369 ; CHECK-NEXT: ucvtf z0.h, p0/m, z1.d
371 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i64(<vscale x 8 x half> %a,
372 <vscale x 2 x i1> %pg,
373 <vscale x 2 x i64> %b)
374 ret <vscale x 8 x half> %out
377 define <vscale x 4 x float> @ucvtf_f32_i64(<vscale x 4 x float> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
378 ; CHECK-LABEL: ucvtf_f32_i64:
380 ; CHECK-NEXT: ucvtf z0.s, p0/m, z1.d
382 %out = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.f32i64(<vscale x 4 x float> %a,
383 <vscale x 2 x i1> %pg,
384 <vscale x 2 x i64> %b)
385 ret <vscale x 4 x float> %out
388 define <vscale x 2 x double> @ucvtf_f64_i32(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 4 x i32> %b) {
389 ; CHECK-LABEL: ucvtf_f64_i32:
391 ; CHECK-NEXT: ucvtf z0.d, p0/m, z1.s
393 %out = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.f64i32(<vscale x 2 x double> %a,
394 <vscale x 2 x i1> %pg,
395 <vscale x 4 x i32> %b)
396 ret <vscale x 2 x double> %out
399 declare <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half>, <vscale x 4 x i1>, <vscale x 4 x float>)
400 declare <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half>, <vscale x 2 x i1>, <vscale x 2 x double>)
401 declare <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f16(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 8 x half>)
402 declare <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f64(<vscale x 4 x float>, <vscale x 2 x i1>, <vscale x 2 x double>)
403 declare <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f16(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 8 x half>)
404 declare <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f32(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 4 x float>)
406 declare <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzs.nxv8i16.nxv8f16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x half>)
407 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.nxv4i32.nxv4f32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x float>)
408 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.nxv2i64.nxv2f64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x double>)
409 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f16(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 8 x half>)
410 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f64(<vscale x 4 x i32>, <vscale x 2 x i1>, <vscale x 2 x double>)
411 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f16(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 8 x half>)
412 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f32(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 4 x float>)
414 declare <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzu.nxv8i16.nxv8f16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x half>)
415 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.nxv4i32.nxv4f32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x float>)
416 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.nxv2i64.nxv2f64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x double>)
417 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f16(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 8 x half>)
418 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f64(<vscale x 4 x i32>, <vscale x 2 x i1>, <vscale x 2 x double>)
419 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f16(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 8 x half>)
420 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f32(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 4 x float>)
422 declare <vscale x 8 x half> @llvm.aarch64.sve.scvtf.nxv8f16.nxv8i16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x i16>)
423 declare <vscale x 4 x float> @llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>)
424 declare <vscale x 2 x double> @llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>)
425 declare <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i32(<vscale x 8 x half>, <vscale x 4 x i1>, <vscale x 4 x i32>)
426 declare <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i64(<vscale x 8 x half>, <vscale x 2 x i1>, <vscale x 2 x i64>)
427 declare <vscale x 4 x float> @llvm.aarch64.sve.scvtf.f32i64(<vscale x 4 x float>, <vscale x 2 x i1>, <vscale x 2 x i64>)
428 declare <vscale x 2 x double> @llvm.aarch64.sve.scvtf.f64i32(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 4 x i32>)
430 declare <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.nxv8f16.nxv8i16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x i16>)
431 declare <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>)
432 declare <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>)
433 declare <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i32(<vscale x 8 x half>, <vscale x 4 x i1>, <vscale x 4 x i32>)
434 declare <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i64(<vscale x 8 x half>, <vscale x 2 x i1>, <vscale x 2 x i64>)
435 declare <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.f32i64(<vscale x 4 x float>, <vscale x 2 x i1>, <vscale x 2 x i64>)
436 declare <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.f64i32(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 4 x i32>)