1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
4 define <vscale x 16 x i1> @facgt_fun(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
5 ; CHECK-LABEL: facgt_fun:
6 ; CHECK: // %bb.0: // %entry
7 ; CHECK-NEXT: facgt p0.d, p0/z, z0.d, z1.d
10 %0 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.facgt.nxv2f64(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c)
11 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
12 ret <vscale x 16 x i1> %1
15 define <vscale x 16 x i1> @facge_fun(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
16 ; CHECK-LABEL: facge_fun:
17 ; CHECK: // %bb.0: // %entry
18 ; CHECK-NEXT: facge p0.d, p0/z, z0.d, z1.d
21 %0 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c)
22 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
23 ret <vscale x 16 x i1> %1
26 define <vscale x 16 x i1> @whilege_fun(i32 %a, i32 %b) {
27 ; CHECK-LABEL: whilege_fun:
28 ; CHECK: // %bb.0: // %entry
29 ; CHECK-NEXT: whilege p0.d, w0, w1
32 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %a, i32 %b)
33 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
34 ret <vscale x 16 x i1> %1
37 define <vscale x 16 x i1> @whilegt_fun(i32 %a, i32 %b) {
38 ; CHECK-LABEL: whilegt_fun:
39 ; CHECK: // %bb.0: // %entry
40 ; CHECK-NEXT: whilegt p0.d, w0, w1
43 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %a, i32 %b)
44 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
45 ret <vscale x 16 x i1> %1
48 define <vscale x 16 x i1> @whilehi_fun(i32 %a, i32 %b) {
49 ; CHECK-LABEL: whilehi_fun:
50 ; CHECK: // %bb.0: // %entry
51 ; CHECK-NEXT: whilehi p0.d, w0, w1
54 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %a, i32 %b)
55 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
56 ret <vscale x 16 x i1> %1
59 define <vscale x 16 x i1> @whilehs_fun(i32 %a, i32 %b) {
60 ; CHECK-LABEL: whilehs_fun:
61 ; CHECK: // %bb.0: // %entry
62 ; CHECK-NEXT: whilehs p0.d, w0, w1
65 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %a, i32 %b)
66 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
67 ret <vscale x 16 x i1> %1
70 define <vscale x 16 x i1> @whilele_fun(i32 %a, i32 %b) {
71 ; CHECK-LABEL: whilele_fun:
72 ; CHECK: // %bb.0: // %entry
73 ; CHECK-NEXT: whilele p0.d, w0, w1
76 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %a, i32 %b)
77 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
78 ret <vscale x 16 x i1> %1
81 define <vscale x 16 x i1> @whilelo_fun(i32 %a, i32 %b) {
82 ; CHECK-LABEL: whilelo_fun:
83 ; CHECK: // %bb.0: // %entry
84 ; CHECK-NEXT: whilelo p0.d, w0, w1
87 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %a, i32 %b)
88 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
89 ret <vscale x 16 x i1> %1
92 define <vscale x 16 x i1> @whilels_fun(i32 %a, i32 %b) {
93 ; CHECK-LABEL: whilels_fun:
94 ; CHECK: // %bb.0: // %entry
95 ; CHECK-NEXT: whilels p0.d, w0, w1
98 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %a, i32 %b)
99 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
100 ret <vscale x 16 x i1> %1
103 define <vscale x 16 x i1> @whilelt_fun(i32 %a, i32 %b) {
104 ; CHECK-LABEL: whilelt_fun:
105 ; CHECK: // %bb.0: // %entry
106 ; CHECK-NEXT: whilelt p0.d, w0, w1
109 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %a, i32 %b)
110 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0)
111 ret <vscale x 16 x i1> %1
114 define <vscale x 16 x i1> @cmpeq_d_fun(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
115 ; CHECK-LABEL: cmpeq_d_fun:
117 ; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d
119 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %pg,
120 <vscale x 2 x i64> %a,
121 <vscale x 2 x i64> %b)
122 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
123 ret <vscale x 16 x i1> %out
126 define <vscale x 16 x i1> @cmpeq_wide_s_fun(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
127 ; CHECK-LABEL: cmpeq_wide_s_fun:
129 ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.d
131 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %pg,
132 <vscale x 4 x i32> %a,
133 <vscale x 2 x i64> %b)
134 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
135 ret <vscale x 16 x i1> %out
138 define <vscale x 16 x i1> @cmpge_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
139 ; CHECK-LABEL: cmpge_d:
141 ; CHECK-NEXT: cmpge p0.d, p0/z, z0.d, z1.d
143 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg,
144 <vscale x 2 x i64> %a,
145 <vscale x 2 x i64> %b)
146 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
147 ret <vscale x 16 x i1> %out
150 define <vscale x 16 x i1> @cmpge_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
151 ; CHECK-LABEL: cmpge_wide_s:
153 ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, z1.d
155 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %pg,
156 <vscale x 4 x i32> %a,
157 <vscale x 2 x i64> %b)
158 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
159 ret <vscale x 16 x i1> %out
162 define <vscale x 16 x i1> @cmpgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
163 ; CHECK-LABEL: cmpgt_d:
165 ; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, z1.d
167 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg,
168 <vscale x 2 x i64> %a,
169 <vscale x 2 x i64> %b)
170 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
171 ret <vscale x 16 x i1> %out
174 define <vscale x 16 x i1> @cmpgt_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
175 ; CHECK-LABEL: cmpgt_wide_s:
177 ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, z1.d
179 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %pg,
180 <vscale x 4 x i32> %a,
181 <vscale x 2 x i64> %b)
182 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
183 ret <vscale x 16 x i1> %out
186 define <vscale x 16 x i1> @cmphi_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
187 ; CHECK-LABEL: cmphi_d:
189 ; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, z1.d
191 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg,
192 <vscale x 2 x i64> %a,
193 <vscale x 2 x i64> %b)
194 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
195 ret <vscale x 16 x i1> %out
198 define <vscale x 16 x i1> @cmphi_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
199 ; CHECK-LABEL: cmphi_wide_s:
201 ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, z1.d
203 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %pg,
204 <vscale x 4 x i32> %a,
205 <vscale x 2 x i64> %b)
206 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
207 ret <vscale x 16 x i1> %out
210 define <vscale x 16 x i1> @cmphs_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
211 ; CHECK-LABEL: cmphs_d:
213 ; CHECK-NEXT: cmphs p0.d, p0/z, z0.d, z1.d
215 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg,
216 <vscale x 2 x i64> %a,
217 <vscale x 2 x i64> %b)
218 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
219 ret <vscale x 16 x i1> %out
222 define <vscale x 16 x i1> @cmphs_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
223 ; CHECK-LABEL: cmphs_wide_s:
225 ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, z1.d
227 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %pg,
228 <vscale x 4 x i32> %a,
229 <vscale x 2 x i64> %b)
230 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
231 ret <vscale x 16 x i1> %out
234 define <vscale x 16 x i1> @cmple_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
235 ; CHECK-LABEL: cmple_wide_s:
237 ; CHECK-NEXT: cmple p0.s, p0/z, z0.s, z1.d
239 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %pg,
240 <vscale x 4 x i32> %a,
241 <vscale x 2 x i64> %b)
242 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
243 ret <vscale x 16 x i1> %out
246 define <vscale x 16 x i1> @cmplo_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
247 ; CHECK-LABEL: cmplo_wide_s:
249 ; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, z1.d
251 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %pg,
252 <vscale x 4 x i32> %a,
253 <vscale x 2 x i64> %b)
254 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
255 ret <vscale x 16 x i1> %out
258 define <vscale x 16 x i1> @cmpls_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
259 ; CHECK-LABEL: cmpls_wide_s:
261 ; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, z1.d
263 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %pg,
264 <vscale x 4 x i32> %a,
265 <vscale x 2 x i64> %b)
266 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
267 ret <vscale x 16 x i1> %out
270 define <vscale x 16 x i1> @cmplt_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
271 ; CHECK-LABEL: cmplt_wide_s:
273 ; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, z1.d
275 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %pg,
276 <vscale x 4 x i32> %a,
277 <vscale x 2 x i64> %b)
278 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
279 ret <vscale x 16 x i1> %out
282 define <vscale x 16 x i1> @cmpne_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
283 ; CHECK-LABEL: cmpne_d:
285 ; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d
287 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %pg,
288 <vscale x 2 x i64> %a,
289 <vscale x 2 x i64> %b)
290 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
291 ret <vscale x 16 x i1> %out
294 define <vscale x 16 x i1> @cmpne_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
295 ; CHECK-LABEL: cmpne_wide_s:
297 ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.d
299 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %pg,
300 <vscale x 4 x i32> %a,
301 <vscale x 2 x i64> %b)
302 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1)
303 ret <vscale x 16 x i1> %out
306 define <vscale x 16 x i1> @fcmeq_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
307 ; CHECK-LABEL: fcmeq_d:
309 ; CHECK-NEXT: fcmeq p0.d, p0/z, z0.d, z1.d
311 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpeq.nxv2f64(<vscale x 2 x i1> %pg,
312 <vscale x 2 x double> %a,
313 <vscale x 2 x double> %b)
314 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
315 ret <vscale x 16 x i1> %out
318 define <vscale x 16 x i1> @fcmgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
319 ; CHECK-LABEL: fcmgt_d:
321 ; CHECK-NEXT: fcmgt p0.d, p0/z, z0.d, z1.d
323 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1> %pg,
324 <vscale x 2 x double> %a,
325 <vscale x 2 x double> %b)
326 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
327 ret <vscale x 16 x i1> %out
330 define <vscale x 16 x i1> @fcmne_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
331 ; CHECK-LABEL: fcmne_d:
333 ; CHECK-NEXT: fcmne p0.d, p0/z, z0.d, z1.d
335 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpne.nxv2f64(<vscale x 2 x i1> %pg,
336 <vscale x 2 x double> %a,
337 <vscale x 2 x double> %b)
338 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
339 ret <vscale x 16 x i1> %out
342 define <vscale x 16 x i1> @fcmuo_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) {
343 ; CHECK-LABEL: fcmuo_d:
345 ; CHECK-NEXT: fcmuo p0.d, p0/z, z0.d, z1.d
347 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpuo.nxv2f64(<vscale x 2 x i1> %pg,
348 <vscale x 2 x double> %a,
349 <vscale x 2 x double> %b)
350 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1)
351 ret <vscale x 16 x i1> %out
354 define <vscale x 16 x i1> @match_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
355 ; CHECK-LABEL: match_i16:
357 ; CHECK-NEXT: match p0.h, p0/z, z0.h, z1.h
359 %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.match.nxv8i16(<vscale x 8 x i1> %pg,
360 <vscale x 8 x i16> %a,
361 <vscale x 8 x i16> %b)
362 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1)
363 ret <vscale x 16 x i1> %out
366 define <vscale x 16 x i1> @nmatch_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
367 ; CHECK-LABEL: nmatch_i16:
369 ; CHECK-NEXT: nmatch p0.h, p0/z, z0.h, z1.h
371 %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.nmatch.nxv8i16(<vscale x 8 x i1> %pg,
372 <vscale x 8 x i16> %a,
373 <vscale x 8 x i16> %b)
374 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1)
375 ret <vscale x 16 x i1> %out
378 declare <vscale x 2 x i1> @llvm.aarch64.sve.facgt.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
379 declare <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
380 declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpeq.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
381 declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpge.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
382 declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
383 declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpne.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
384 declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpuo.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>)
385 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32, i32)
386 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32, i32)
387 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32, i32)
388 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32, i32)
389 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32, i32)
390 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32, i32)
391 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32, i32)
392 declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32, i32)
393 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
394 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
395 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
396 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
397 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
398 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
399 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
400 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
401 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
402 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
403 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
404 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
405 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
406 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
407 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
408 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
409 declare <vscale x 8 x i1> @llvm.aarch64.sve.match.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
410 declare <vscale x 8 x i1> @llvm.aarch64.sve.nmatch.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
411 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>)
412 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>)
413 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>)