1 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
7 define <vscale x 8 x half> @fcvt_f16_f32(<vscale x 8 x half> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
8 ; CHECK-LABEL: fcvt_f16_f32:
9 ; CHECK: fcvt z0.h, p0/m, z1.s
11 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half> %a,
12 <vscale x 4 x i1> %pg,
13 <vscale x 4 x float> %b)
14 ret <vscale x 8 x half> %out
17 define <vscale x 8 x half> @fcvt_f16_f64(<vscale x 8 x half> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
18 ; CHECK-LABEL: fcvt_f16_f64:
19 ; CHECK: fcvt z0.h, p0/m, z1.d
21 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half> %a,
22 <vscale x 2 x i1> %pg,
23 <vscale x 2 x double> %b)
24 ret <vscale x 8 x half> %out
27 define <vscale x 4 x float> @fcvt_f32_f16(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 8 x half> %b) {
28 ; CHECK-LABEL: fcvt_f32_f16:
29 ; CHECK: fcvt z0.s, p0/m, z1.h
31 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f16(<vscale x 4 x float> %a,
32 <vscale x 4 x i1> %pg,
33 <vscale x 8 x half> %b)
34 ret <vscale x 4 x float> %out
37 define <vscale x 4 x float> @fcvt_f32_f64(<vscale x 4 x float> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
38 ; CHECK-LABEL: fcvt_f32_f64:
39 ; CHECK: fcvt z0.s, p0/m, z1.d
41 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f64(<vscale x 4 x float> %a,
42 <vscale x 2 x i1> %pg,
43 <vscale x 2 x double> %b)
44 ret <vscale x 4 x float> %out
47 define <vscale x 2 x double> @fcvt_f64_f16(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 8 x half> %b) {
48 ; CHECK-LABEL: fcvt_f64_f16:
49 ; CHECK: fcvt z0.d, p0/m, z1.h
51 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f16(<vscale x 2 x double> %a,
52 <vscale x 2 x i1> %pg,
53 <vscale x 8 x half> %b)
54 ret <vscale x 2 x double> %out
57 define <vscale x 2 x double> @fcvt_f64_f32(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 4 x float> %b) {
58 ; CHECK-LABEL: fcvt_f64_f32:
59 ; CHECK: fcvt z0.d, p0/m, z1.s
61 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f32(<vscale x 2 x double> %a,
62 <vscale x 2 x i1> %pg,
63 <vscale x 4 x float> %b)
64 ret <vscale x 2 x double> %out
71 define <vscale x 8 x i16> @fcvtzs_i16_f16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
72 ; CHECK-LABEL: fcvtzs_i16_f16:
73 ; CHECK: fcvtzs z0.h, p0/m, z1.h
75 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzs.nxv8i16.nxv8f16(<vscale x 8 x i16> %a,
76 <vscale x 8 x i1> %pg,
77 <vscale x 8 x half> %b)
78 ret <vscale x 8 x i16> %out
81 define <vscale x 4 x i32> @fcvtzs_i32_f32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
82 ; CHECK-LABEL: fcvtzs_i32_f32:
83 ; CHECK: fcvtzs z0.s, p0/m, z1.s
85 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.nxv4i32.nxv4f32(<vscale x 4 x i32> %a,
86 <vscale x 4 x i1> %pg,
87 <vscale x 4 x float> %b)
88 ret <vscale x 4 x i32> %out
91 define <vscale x 2 x i64> @fcvtzs_i64_f64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
92 ; CHECK-LABEL: fcvtzs_i64_f64:
93 ; CHECK: fcvtzs z0.d, p0/m, z1.d
95 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.nxv2i64.nxv2f64(<vscale x 2 x i64> %a,
96 <vscale x 2 x i1> %pg,
97 <vscale x 2 x double> %b)
98 ret <vscale x 2 x i64> %out
101 define <vscale x 4 x i32> @fcvtzs_i32_f16(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 8 x half> %b) {
102 ; CHECK-LABEL: fcvtzs_i32_f16:
103 ; CHECK: fcvtzs z0.s, p0/m, z1.h
105 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f16(<vscale x 4 x i32> %a,
106 <vscale x 4 x i1> %pg,
107 <vscale x 8 x half> %b)
108 ret <vscale x 4 x i32> %out
111 define <vscale x 4 x i32> @fcvtzs_i32_f64(<vscale x 4 x i32> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
112 ; CHECK-LABEL: fcvtzs_i32_f64:
113 ; CHECK: fcvtzs z0.s, p0/m, z1.d
115 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f64(<vscale x 4 x i32> %a,
116 <vscale x 2 x i1> %pg,
117 <vscale x 2 x double> %b)
118 ret <vscale x 4 x i32> %out
121 define <vscale x 2 x i64> @fcvtzs_i64_f16(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 8 x half> %b) {
122 ; CHECK-LABEL: fcvtzs_i64_f16:
123 ; CHECK: fcvtzs z0.d, p0/m, z1.h
125 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f16(<vscale x 2 x i64> %a,
126 <vscale x 2 x i1> %pg,
127 <vscale x 8 x half> %b)
128 ret <vscale x 2 x i64> %out
131 define <vscale x 2 x i64> @fcvtzs_i64_f32(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 4 x float> %b) {
132 ; CHECK-LABEL: fcvtzs_i64_f32:
133 ; CHECK: fcvtzs z0.d, p0/m, z1.s
135 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f32(<vscale x 2 x i64> %a,
136 <vscale x 2 x i1> %pg,
137 <vscale x 4 x float> %b)
138 ret <vscale x 2 x i64> %out
145 define <vscale x 8 x i16> @fcvtzu_i16_f16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b) {
146 ; CHECK-LABEL: fcvtzu_i16_f16:
147 ; CHECK: fcvtzu z0.h, p0/m, z1.h
149 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzu.nxv8i16.nxv8f16(<vscale x 8 x i16> %a,
150 <vscale x 8 x i1> %pg,
151 <vscale x 8 x half> %b)
152 ret <vscale x 8 x i16> %out
155 define <vscale x 4 x i32> @fcvtzu_i32_f32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b) {
156 ; CHECK-LABEL: fcvtzu_i32_f32:
157 ; CHECK: fcvtzu z0.s, p0/m, z1.s
159 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.nxv4i32.nxv4f32(<vscale x 4 x i32> %a,
160 <vscale x 4 x i1> %pg,
161 <vscale x 4 x float> %b)
162 ret <vscale x 4 x i32> %out
165 define <vscale x 2 x i64> @fcvtzu_i64_f64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
166 ; CHECK-LABEL: fcvtzu_i64_f64:
167 ; CHECK: fcvtzu z0.d, p0/m, z1.d
169 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.nxv2i64.nxv2f64(<vscale x 2 x i64> %a,
170 <vscale x 2 x i1> %pg,
171 <vscale x 2 x double> %b)
172 ret <vscale x 2 x i64> %out
175 define <vscale x 4 x i32> @fcvtzu_i32_f16(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 8 x half> %b) {
176 ; CHECK-LABEL: fcvtzu_i32_f16:
177 ; CHECK: fcvtzu z0.s, p0/m, z1.h
179 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f16(<vscale x 4 x i32> %a,
180 <vscale x 4 x i1> %pg,
181 <vscale x 8 x half> %b)
182 ret <vscale x 4 x i32> %out
185 define <vscale x 4 x i32> @fcvtzu_i32_f64(<vscale x 4 x i32> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b) {
186 ; CHECK-LABEL: fcvtzu_i32_f64:
187 ; CHECK: fcvtzu z0.s, p0/m, z1.d
189 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f64(<vscale x 4 x i32> %a,
190 <vscale x 2 x i1> %pg,
191 <vscale x 2 x double> %b)
192 ret <vscale x 4 x i32> %out
195 define <vscale x 2 x i64> @fcvtzu_i64_f16(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 8 x half> %b) {
196 ; CHECK-LABEL: fcvtzu_i64_f16:
197 ; CHECK: fcvtzu z0.d, p0/m, z1.h
199 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f16(<vscale x 2 x i64> %a,
200 <vscale x 2 x i1> %pg,
201 <vscale x 8 x half> %b)
202 ret <vscale x 2 x i64> %out
205 define <vscale x 2 x i64> @fcvtzu_i64_f32(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 4 x float> %b) {
206 ; CHECK-LABEL: fcvtzu_i64_f32:
207 ; CHECK: fcvtzu z0.d, p0/m, z1.s
209 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f32(<vscale x 2 x i64> %a,
210 <vscale x 2 x i1> %pg,
211 <vscale x 4 x float> %b)
212 ret <vscale x 2 x i64> %out
219 define <vscale x 8 x half> @scvtf_f16_i16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) {
220 ; CHECK-LABEL: scvtf_f16_i16:
221 ; CHECK: scvtf z0.h, p0/m, z1.h
223 %out = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.nxv8f16.nxv8i16(<vscale x 8 x half> %a,
224 <vscale x 8 x i1> %pg,
225 <vscale x 8 x i16> %b)
226 ret <vscale x 8 x half> %out
229 define <vscale x 4 x float> @scvtf_f32_i32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
230 ; CHECK-LABEL: scvtf_f32_i32:
231 ; CHECK: scvtf z0.s, p0/m, z1.s
233 %out = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32(<vscale x 4 x float> %a,
234 <vscale x 4 x i1> %pg,
235 <vscale x 4 x i32> %b)
236 ret <vscale x 4 x float> %out
239 define <vscale x 2 x double> @scvtf_f64_i64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
240 ; CHECK-LABEL: scvtf_f64_i64:
241 ; CHECK: scvtf z0.d, p0/m, z1.d
243 %out = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64(<vscale x 2 x double> %a,
244 <vscale x 2 x i1> %pg,
245 <vscale x 2 x i64> %b)
246 ret <vscale x 2 x double> %out
249 define <vscale x 8 x half> @scvtf_f16_i32(<vscale x 8 x half> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
250 ; CHECK-LABEL: scvtf_f16_i32:
251 ; CHECK: scvtf z0.h, p0/m, z1.s
253 %out = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i32(<vscale x 8 x half> %a,
254 <vscale x 4 x i1> %pg,
255 <vscale x 4 x i32> %b)
256 ret <vscale x 8 x half> %out
259 define <vscale x 8 x half> @scvtf_f16_i64(<vscale x 8 x half> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
260 ; CHECK-LABEL: scvtf_f16_i64:
261 ; CHECK: scvtf z0.h, p0/m, z1.d
263 %out = call <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i64(<vscale x 8 x half> %a,
264 <vscale x 2 x i1> %pg,
265 <vscale x 2 x i64> %b)
266 ret <vscale x 8 x half> %out
269 define <vscale x 4 x float> @scvtf_f32_i64(<vscale x 4 x float> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
270 ; CHECK-LABEL: scvtf_f32_i64:
271 ; CHECK: scvtf z0.s, p0/m, z1.d
273 %out = call <vscale x 4 x float> @llvm.aarch64.sve.scvtf.f32i64(<vscale x 4 x float> %a,
274 <vscale x 2 x i1> %pg,
275 <vscale x 2 x i64> %b)
276 ret <vscale x 4 x float> %out
279 define <vscale x 2 x double> @scvtf_f64_i32(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 4 x i32> %b) {
280 ; CHECK-LABEL: scvtf_f64_i32:
281 ; CHECK: scvtf z0.d, p0/m, z1.s
283 %out = call <vscale x 2 x double> @llvm.aarch64.sve.scvtf.f64i32(<vscale x 2 x double> %a,
284 <vscale x 2 x i1> %pg,
285 <vscale x 4 x i32> %b)
286 ret <vscale x 2 x double> %out
293 define <vscale x 8 x half> @ucvtf_f16_i16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) {
294 ; CHECK-LABEL: ucvtf_f16_i16:
295 ; CHECK: ucvtf z0.h, p0/m, z1.h
297 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.nxv8f16.nxv8i16(<vscale x 8 x half> %a,
298 <vscale x 8 x i1> %pg,
299 <vscale x 8 x i16> %b)
300 ret <vscale x 8 x half> %out
303 define <vscale x 4 x float> @ucvtf_f32_i32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
304 ; CHECK-LABEL: ucvtf_f32_i32:
305 ; CHECK: ucvtf z0.s, p0/m, z1.s
307 %out = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32(<vscale x 4 x float> %a,
308 <vscale x 4 x i1> %pg,
309 <vscale x 4 x i32> %b)
310 ret <vscale x 4 x float> %out
313 define <vscale x 2 x double> @ucvtf_f64_i64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
314 ; CHECK-LABEL: ucvtf_f64_i64:
315 ; CHECK: ucvtf z0.d, p0/m, z1.d
317 %out = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64(<vscale x 2 x double> %a,
318 <vscale x 2 x i1> %pg,
319 <vscale x 2 x i64> %b)
320 ret <vscale x 2 x double> %out
323 define <vscale x 8 x half> @ucvtf_f16_i32(<vscale x 8 x half> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) {
324 ; CHECK-LABEL: ucvtf_f16_i32:
325 ; CHECK: ucvtf z0.h, p0/m, z1.s
327 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i32(<vscale x 8 x half> %a,
328 <vscale x 4 x i1> %pg,
329 <vscale x 4 x i32> %b)
330 ret <vscale x 8 x half> %out
333 define <vscale x 8 x half> @ucvtf_f16_i64(<vscale x 8 x half> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
334 ; CHECK-LABEL: ucvtf_f16_i64:
335 ; CHECK: ucvtf z0.h, p0/m, z1.d
337 %out = call <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i64(<vscale x 8 x half> %a,
338 <vscale x 2 x i1> %pg,
339 <vscale x 2 x i64> %b)
340 ret <vscale x 8 x half> %out
343 define <vscale x 4 x float> @ucvtf_f32_i64(<vscale x 4 x float> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) {
344 ; CHECK-LABEL: ucvtf_f32_i64:
345 ; CHECK: ucvtf z0.s, p0/m, z1.d
347 %out = call <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.f32i64(<vscale x 4 x float> %a,
348 <vscale x 2 x i1> %pg,
349 <vscale x 2 x i64> %b)
350 ret <vscale x 4 x float> %out
353 define <vscale x 2 x double> @ucvtf_f64_i32(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 4 x i32> %b) {
354 ; CHECK-LABEL: ucvtf_f64_i32:
355 ; CHECK: ucvtf z0.d, p0/m, z1.s
357 %out = call <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.f64i32(<vscale x 2 x double> %a,
358 <vscale x 2 x i1> %pg,
359 <vscale x 4 x i32> %b)
360 ret <vscale x 2 x double> %out
363 declare <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f32(<vscale x 8 x half>, <vscale x 4 x i1>, <vscale x 4 x float>)
364 declare <vscale x 8 x half> @llvm.aarch64.sve.fcvt.f16f64(<vscale x 8 x half>, <vscale x 2 x i1>, <vscale x 2 x double>)
365 declare <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f16(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 8 x half>)
366 declare <vscale x 4 x float> @llvm.aarch64.sve.fcvt.f32f64(<vscale x 4 x float>, <vscale x 2 x i1>, <vscale x 2 x double>)
367 declare <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f16(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 8 x half>)
368 declare <vscale x 2 x double> @llvm.aarch64.sve.fcvt.f64f32(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 4 x float>)
370 declare <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzs.nxv8i16.nxv8f16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x half>)
371 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.nxv4i32.nxv4f32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x float>)
372 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.nxv2i64.nxv2f64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x double>)
373 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f16(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 8 x half>)
374 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzs.i32f64(<vscale x 4 x i32>, <vscale x 2 x i1>, <vscale x 2 x double>)
375 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f16(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 8 x half>)
376 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzs.i64f32(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 4 x float>)
378 declare <vscale x 8 x i16> @llvm.aarch64.sve.fcvtzu.nxv8i16.nxv8f16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x half>)
379 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.nxv4i32.nxv4f32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x float>)
380 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.nxv2i64.nxv2f64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x double>)
381 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f16(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 8 x half>)
382 declare <vscale x 4 x i32> @llvm.aarch64.sve.fcvtzu.i32f64(<vscale x 4 x i32>, <vscale x 2 x i1>, <vscale x 2 x double>)
383 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f16(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 8 x half>)
384 declare <vscale x 2 x i64> @llvm.aarch64.sve.fcvtzu.i64f32(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 4 x float>)
386 declare <vscale x 8 x half> @llvm.aarch64.sve.scvtf.nxv8f16.nxv8i16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x i16>)
387 declare <vscale x 4 x float> @llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>)
388 declare <vscale x 2 x double> @llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>)
389 declare <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i32(<vscale x 8 x half>, <vscale x 4 x i1>, <vscale x 4 x i32>)
390 declare <vscale x 8 x half> @llvm.aarch64.sve.scvtf.f16i64(<vscale x 8 x half>, <vscale x 2 x i1>, <vscale x 2 x i64>)
391 declare <vscale x 4 x float> @llvm.aarch64.sve.scvtf.f32i64(<vscale x 4 x float>, <vscale x 2 x i1>, <vscale x 2 x i64>)
392 declare <vscale x 2 x double> @llvm.aarch64.sve.scvtf.f64i32(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 4 x i32>)
394 declare <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.nxv8f16.nxv8i16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x i16>)
395 declare <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x i32>)
396 declare <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x i64>)
397 declare <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i32(<vscale x 8 x half>, <vscale x 4 x i1>, <vscale x 4 x i32>)
398 declare <vscale x 8 x half> @llvm.aarch64.sve.ucvtf.f16i64(<vscale x 8 x half>, <vscale x 2 x i1>, <vscale x 2 x i64>)
399 declare <vscale x 4 x float> @llvm.aarch64.sve.ucvtf.f32i64(<vscale x 4 x float>, <vscale x 2 x i1>, <vscale x 2 x i64>)
400 declare <vscale x 2 x double> @llvm.aarch64.sve.ucvtf.f64i32(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 4 x i32>)