1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
7 ; ABS (sve_int_un_pred_arit_0)
10 ; Check movprfx is inserted when no passthru/predicate is present
11 define <vscale x 16 x i8> @abs_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
12 ; CHECK-LABEL: abs_i8:
14 ; CHECK-NEXT: ptrue p0.b
15 ; CHECK-NEXT: movprfx z0, z1
16 ; CHECK-NEXT: abs z0.b, p0/m, z1.b
18 %ret = tail call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %b, i1 0)
19 ret <vscale x 16 x i8> %ret
22 ; Check movprfx is not inserted when dstReg == srcReg
23 define <vscale x 16 x i8> @abs_i8_dupreg(<vscale x 16 x i8> %a) #0 {
24 ; CHECK-LABEL: abs_i8_dupreg:
26 ; CHECK-NEXT: ptrue p0.b
27 ; CHECK-NEXT: abs z0.b, p0/m, z0.b
29 %ret = tail call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %a, i1 0)
30 ret <vscale x 16 x i8> %ret
33 ; Check movprfx is inserted when passthru is undef
34 define <vscale x 16 x i8> @abs_i8_undef(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
35 ; CHECK-LABEL: abs_i8_undef:
37 ; CHECK-NEXT: ptrue p0.b
38 ; CHECK-NEXT: movprfx z0, z1
39 ; CHECK-NEXT: abs z0.b, p0/m, z1.b
41 %pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
42 %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
43 ret <vscale x 16 x i8> %ret
46 ; Check movprfx is inserted when predicate is all active, making the passthru dead
47 define <vscale x 16 x i8> @abs_i8_active(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
48 ; CHECK-LABEL: abs_i8_active:
50 ; CHECK-NEXT: ptrue p0.b
51 ; CHECK-NEXT: movprfx z0, z1
52 ; CHECK-NEXT: abs z0.b, p0/m, z1.b
54 %pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
55 %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
56 ret <vscale x 16 x i8> %ret
59 ; Check movprfx is not inserted when predicate is not all active, making the passthru used
60 define <vscale x 16 x i8> @abs_i8_not_active(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
61 ; CHECK-LABEL: abs_i8_not_active:
63 ; CHECK-NEXT: ptrue p0.d
64 ; CHECK-NEXT: abs z0.b, p0/m, z1.b
66 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
67 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
68 %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg.to, <vscale x 16 x i8> %b)
69 ret <vscale x 16 x i8> %ret
72 define <vscale x 8 x i16> @abs_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
73 ; CHECK-LABEL: abs_i16:
75 ; CHECK-NEXT: ptrue p0.h
76 ; CHECK-NEXT: movprfx z0, z1
77 ; CHECK-NEXT: abs z0.h, p0/m, z1.h
79 %ret = tail call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %b, i1 0)
80 ret <vscale x 8 x i16> %ret
83 define <vscale x 8 x i16> @abs_i16_dupreg(<vscale x 8 x i16> %a) #0 {
84 ; CHECK-LABEL: abs_i16_dupreg:
86 ; CHECK-NEXT: ptrue p0.h
87 ; CHECK-NEXT: abs z0.h, p0/m, z0.h
89 %ret = tail call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %a, i1 0)
90 ret <vscale x 8 x i16> %ret
93 define <vscale x 8 x i16> @abs_i16_undef(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
94 ; CHECK-LABEL: abs_i16_undef:
96 ; CHECK-NEXT: ptrue p0.h
97 ; CHECK-NEXT: movprfx z0, z1
98 ; CHECK-NEXT: abs z0.h, p0/m, z1.h
100 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
101 %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
102 ret <vscale x 8 x i16> %ret
105 define <vscale x 8 x i16> @abs_i16_active(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
106 ; CHECK-LABEL: abs_i16_active:
108 ; CHECK-NEXT: ptrue p0.h
109 ; CHECK-NEXT: movprfx z0, z1
110 ; CHECK-NEXT: abs z0.h, p0/m, z1.h
112 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
113 %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
114 ret <vscale x 8 x i16> %ret
117 define <vscale x 8 x i16> @abs_i16_not_active(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
118 ; CHECK-LABEL: abs_i16_not_active:
120 ; CHECK-NEXT: ptrue p0.d
121 ; CHECK-NEXT: abs z0.h, p0/m, z1.h
123 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
124 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
125 %pg.from = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg.to)
126 %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg.from, <vscale x 8 x i16> %b)
127 ret <vscale x 8 x i16> %ret
130 define <vscale x 4 x i32> @abs_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
131 ; CHECK-LABEL: abs_i32:
133 ; CHECK-NEXT: ptrue p0.s
134 ; CHECK-NEXT: movprfx z0, z1
135 ; CHECK-NEXT: abs z0.s, p0/m, z1.s
137 %ret = tail call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %b, i1 0)
138 ret <vscale x 4 x i32> %ret
141 define <vscale x 4 x i32> @abs_i32_dupreg(<vscale x 4 x i32> %a) #0 {
142 ; CHECK-LABEL: abs_i32_dupreg:
144 ; CHECK-NEXT: ptrue p0.s
145 ; CHECK-NEXT: abs z0.s, p0/m, z0.s
147 %ret = tail call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %a, i1 0)
148 ret <vscale x 4 x i32> %ret
151 define <vscale x 4 x i32> @abs_i32_undef(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
152 ; CHECK-LABEL: abs_i32_undef:
154 ; CHECK-NEXT: ptrue p0.s
155 ; CHECK-NEXT: movprfx z0, z1
156 ; CHECK-NEXT: abs z0.s, p0/m, z1.s
158 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
159 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
160 ret <vscale x 4 x i32> %ret
163 define <vscale x 4 x i32> @abs_i32_active(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
164 ; CHECK-LABEL: abs_i32_active:
166 ; CHECK-NEXT: ptrue p0.s
167 ; CHECK-NEXT: movprfx z0, z1
168 ; CHECK-NEXT: abs z0.s, p0/m, z1.s
170 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
171 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
172 ret <vscale x 4 x i32> %ret
175 define <vscale x 4 x i32> @abs_i32_not_active(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
176 ; CHECK-LABEL: abs_i32_not_active:
178 ; CHECK-NEXT: ptrue p0.d
179 ; CHECK-NEXT: abs z0.s, p0/m, z1.s
181 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
182 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
183 %pg.from = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.to)
184 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg.from, <vscale x 4 x i32> %b)
185 ret <vscale x 4 x i32> %ret
188 define <vscale x 2 x i64> @abs_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
189 ; CHECK-LABEL: abs_i64:
191 ; CHECK-NEXT: ptrue p0.d
192 ; CHECK-NEXT: movprfx z0, z1
193 ; CHECK-NEXT: abs z0.d, p0/m, z1.d
195 %ret = tail call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %b, i1 0)
196 ret <vscale x 2 x i64> %ret
199 define <vscale x 2 x i64> @abs_i64_dupreg(<vscale x 2 x i64> %a) #0 {
200 ; CHECK-LABEL: abs_i64_dupreg:
202 ; CHECK-NEXT: ptrue p0.d
203 ; CHECK-NEXT: abs z0.d, p0/m, z0.d
205 %ret = tail call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %a, i1 0)
206 ret <vscale x 2 x i64> %ret
209 define <vscale x 2 x i64> @abs_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
210 ; CHECK-LABEL: abs_i64_undef:
212 ; CHECK-NEXT: ptrue p0.d
213 ; CHECK-NEXT: movprfx z0, z1
214 ; CHECK-NEXT: abs z0.d, p0/m, z1.d
216 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
217 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
218 ret <vscale x 2 x i64> %ret
221 define <vscale x 2 x i64> @abs_i64_active(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
222 ; CHECK-LABEL: abs_i64_active:
224 ; CHECK-NEXT: ptrue p0.d
225 ; CHECK-NEXT: movprfx z0, z1
226 ; CHECK-NEXT: abs z0.d, p0/m, z1.d
228 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
229 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
230 ret <vscale x 2 x i64> %ret
233 define <vscale x 2 x i64> @abs_i64_not_active(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %pg) #0 {
234 ; CHECK-LABEL: abs_i64_not_active:
236 ; CHECK-NEXT: abs z0.d, p0/m, z1.d
238 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
239 ret <vscale x 2 x i64> %ret
243 ; CLS (sve_int_un_pred_arit_1)
246 define <vscale x 16 x i8> @cls_i8_dupreg(<vscale x 16 x i8> %a) #0 {
247 ; CHECK-LABEL: cls_i8_dupreg:
249 ; CHECK-NEXT: ptrue p0.b
250 ; CHECK-NEXT: cls z0.b, p0/m, z0.b
252 %pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
253 %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a)
254 ret <vscale x 16 x i8> %ret
257 define <vscale x 16 x i8> @cls_i8_undef(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
258 ; CHECK-LABEL: cls_i8_undef:
260 ; CHECK-NEXT: ptrue p0.b
261 ; CHECK-NEXT: movprfx z0, z1
262 ; CHECK-NEXT: cls z0.b, p0/m, z1.b
264 %pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
265 %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
266 ret <vscale x 16 x i8> %ret
269 define <vscale x 16 x i8> @cls_i8_active(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
270 ; CHECK-LABEL: cls_i8_active:
272 ; CHECK-NEXT: ptrue p0.b
273 ; CHECK-NEXT: movprfx z0, z1
274 ; CHECK-NEXT: cls z0.b, p0/m, z1.b
276 %pg = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
277 %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b)
278 ret <vscale x 16 x i8> %ret
281 define <vscale x 16 x i8> @cls_i8_not_active(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
282 ; CHECK-LABEL: cls_i8_not_active:
284 ; CHECK-NEXT: ptrue p0.d
285 ; CHECK-NEXT: cls z0.b, p0/m, z1.b
287 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
288 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
289 %ret = tail call <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg.to, <vscale x 16 x i8> %b)
290 ret <vscale x 16 x i8> %ret
293 define <vscale x 8 x i16> @cls_i16_dupreg(<vscale x 8 x i16> %a) #0 {
294 ; CHECK-LABEL: cls_i16_dupreg:
296 ; CHECK-NEXT: ptrue p0.h
297 ; CHECK-NEXT: cls z0.h, p0/m, z0.h
299 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
300 %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a)
301 ret <vscale x 8 x i16> %ret
304 define <vscale x 8 x i16> @cls_i16_undef(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
305 ; CHECK-LABEL: cls_i16_undef:
307 ; CHECK-NEXT: ptrue p0.h
308 ; CHECK-NEXT: movprfx z0, z1
309 ; CHECK-NEXT: cls z0.h, p0/m, z1.h
311 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
312 %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
313 ret <vscale x 8 x i16> %ret
316 define <vscale x 8 x i16> @cls_i16_active(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
317 ; CHECK-LABEL: cls_i16_active:
319 ; CHECK-NEXT: ptrue p0.h
320 ; CHECK-NEXT: movprfx z0, z1
321 ; CHECK-NEXT: cls z0.h, p0/m, z1.h
323 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
324 %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
325 ret <vscale x 8 x i16> %ret
328 define <vscale x 8 x i16> @cls_i16_not_active(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
329 ; CHECK-LABEL: cls_i16_not_active:
331 ; CHECK-NEXT: ptrue p0.d
332 ; CHECK-NEXT: cls z0.h, p0/m, z1.h
334 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
335 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
336 %pg.from = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg.to)
337 %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg.from, <vscale x 8 x i16> %b)
338 ret <vscale x 8 x i16> %ret
341 define <vscale x 4 x i32> @cls_i32_dupreg(<vscale x 4 x i32> %a) #0 {
342 ; CHECK-LABEL: cls_i32_dupreg:
344 ; CHECK-NEXT: ptrue p0.s
345 ; CHECK-NEXT: cls z0.s, p0/m, z0.s
347 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
348 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cls.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a)
349 ret <vscale x 4 x i32> %ret
352 define <vscale x 4 x i32> @cls_i32_undef(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
353 ; CHECK-LABEL: cls_i32_undef:
355 ; CHECK-NEXT: ptrue p0.s
356 ; CHECK-NEXT: movprfx z0, z1
357 ; CHECK-NEXT: cls z0.s, p0/m, z1.s
359 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
360 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cls.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
361 ret <vscale x 4 x i32> %ret
364 define <vscale x 4 x i32> @cls_i32_active(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
365 ; CHECK-LABEL: cls_i32_active:
367 ; CHECK-NEXT: ptrue p0.s
368 ; CHECK-NEXT: movprfx z0, z1
369 ; CHECK-NEXT: cls z0.s, p0/m, z1.s
371 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
372 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cls.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
373 ret <vscale x 4 x i32> %ret
376 define <vscale x 4 x i32> @cls_i32_not_active(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
377 ; CHECK-LABEL: cls_i32_not_active:
379 ; CHECK-NEXT: ptrue p0.d
380 ; CHECK-NEXT: cls z0.s, p0/m, z1.s
382 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
383 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
384 %pg.from = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.to)
385 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.cls.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg.from, <vscale x 4 x i32> %b)
386 ret <vscale x 4 x i32> %ret
389 define <vscale x 2 x i64> @cls_i64_dupreg(<vscale x 2 x i64> %a) #0 {
390 ; CHECK-LABEL: cls_i64_dupreg:
392 ; CHECK-NEXT: ptrue p0.d
393 ; CHECK-NEXT: cls z0.d, p0/m, z0.d
395 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
396 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cls.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a)
397 ret <vscale x 2 x i64> %ret
400 define <vscale x 2 x i64> @cls_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
401 ; CHECK-LABEL: cls_i64_undef:
403 ; CHECK-NEXT: ptrue p0.d
404 ; CHECK-NEXT: movprfx z0, z1
405 ; CHECK-NEXT: cls z0.d, p0/m, z1.d
407 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
408 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cls.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
409 ret <vscale x 2 x i64> %ret
412 define <vscale x 2 x i64> @cls_i64_active(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
413 ; CHECK-LABEL: cls_i64_active:
415 ; CHECK-NEXT: ptrue p0.d
416 ; CHECK-NEXT: movprfx z0, z1
417 ; CHECK-NEXT: cls z0.d, p0/m, z1.d
419 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
420 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cls.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
421 ret <vscale x 2 x i64> %ret
424 define <vscale x 2 x i64> @cls_i64_not_active(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %pg) #0 {
425 ; CHECK-LABEL: cls_i64_not_active:
427 ; CHECK-NEXT: cls z0.d, p0/m, z1.d
429 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.cls.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
430 ret <vscale x 2 x i64> %ret
434 ; FABS (sve_int_un_pred_arit_1_fp)
437 define <vscale x 8 x half> @fabs_f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
438 ; CHECK-LABEL: fabs_f16:
440 ; CHECK-NEXT: ptrue p0.h
441 ; CHECK-NEXT: movprfx z0, z1
442 ; CHECK-NEXT: fabs z0.h, p0/m, z1.h
444 %ret = tail call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> %b)
445 ret <vscale x 8 x half> %ret
448 define <vscale x 8 x half> @fabs_f16_dupreg(<vscale x 8 x half> %a) #0 {
449 ; CHECK-LABEL: fabs_f16_dupreg:
451 ; CHECK-NEXT: ptrue p0.h
452 ; CHECK-NEXT: fabs z0.h, p0/m, z0.h
454 %ret = tail call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> %a)
455 ret <vscale x 8 x half> %ret
458 define <vscale x 8 x half> @fabs_f16_undef(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
459 ; CHECK-LABEL: fabs_f16_undef:
461 ; CHECK-NEXT: ptrue p0.h
462 ; CHECK-NEXT: movprfx z0, z1
463 ; CHECK-NEXT: fabs z0.h, p0/m, z1.h
465 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
466 %ret = tail call <vscale x 8 x half> @llvm.aarch64.sve.fabs.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b)
467 ret <vscale x 8 x half> %ret
470 define <vscale x 8 x half> @fabs_f16_active(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
471 ; CHECK-LABEL: fabs_f16_active:
473 ; CHECK-NEXT: ptrue p0.h
474 ; CHECK-NEXT: movprfx z0, z1
475 ; CHECK-NEXT: fabs z0.h, p0/m, z1.h
477 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
478 %ret = tail call <vscale x 8 x half> @llvm.aarch64.sve.fabs.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b)
479 ret <vscale x 8 x half> %ret
482 define <vscale x 8 x half> @fabs_f16_not_active(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
483 ; CHECK-LABEL: fabs_f16_not_active:
485 ; CHECK-NEXT: ptrue p0.d
486 ; CHECK-NEXT: fabs z0.h, p0/m, z1.h
488 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
489 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
490 %pg.from = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg.to)
491 %ret = tail call <vscale x 8 x half> @llvm.aarch64.sve.fabs.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg.from, <vscale x 8 x half> %b)
492 ret <vscale x 8 x half> %ret
495 define <vscale x 4 x float> @fabs_f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
496 ; CHECK-LABEL: fabs_f32:
498 ; CHECK-NEXT: ptrue p0.s
499 ; CHECK-NEXT: movprfx z0, z1
500 ; CHECK-NEXT: fabs z0.s, p0/m, z1.s
502 %ret = tail call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> %b)
503 ret <vscale x 4 x float> %ret
506 define <vscale x 4 x float> @fabs_f32_dupreg(<vscale x 4 x float> %a) #0 {
507 ; CHECK-LABEL: fabs_f32_dupreg:
509 ; CHECK-NEXT: ptrue p0.s
510 ; CHECK-NEXT: fabs z0.s, p0/m, z0.s
512 %ret = tail call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> %a)
513 ret <vscale x 4 x float> %ret
516 define <vscale x 4 x float> @fabs_f32_undef(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
517 ; CHECK-LABEL: fabs_f32_undef:
519 ; CHECK-NEXT: ptrue p0.s
520 ; CHECK-NEXT: movprfx z0, z1
521 ; CHECK-NEXT: fabs z0.s, p0/m, z1.s
523 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
524 %ret = tail call <vscale x 4 x float> @llvm.aarch64.sve.fabs.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b)
525 ret <vscale x 4 x float> %ret
528 define <vscale x 4 x float> @fabs_f32_active(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
529 ; CHECK-LABEL: fabs_f32_active:
531 ; CHECK-NEXT: ptrue p0.s
532 ; CHECK-NEXT: movprfx z0, z1
533 ; CHECK-NEXT: fabs z0.s, p0/m, z1.s
535 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
536 %ret = tail call <vscale x 4 x float> @llvm.aarch64.sve.fabs.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b)
537 ret <vscale x 4 x float> %ret
540 define <vscale x 4 x float> @fabs_f32_not_active(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
541 ; CHECK-LABEL: fabs_f32_not_active:
543 ; CHECK-NEXT: ptrue p0.d
544 ; CHECK-NEXT: fabs z0.s, p0/m, z1.s
546 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
547 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
548 %pg.from = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.to)
549 %ret = tail call <vscale x 4 x float> @llvm.aarch64.sve.fabs.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg.from, <vscale x 4 x float> %b)
550 ret <vscale x 4 x float> %ret
553 define <vscale x 2 x double> @fabs_f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
554 ; CHECK-LABEL: fabs_f64:
556 ; CHECK-NEXT: ptrue p0.d
557 ; CHECK-NEXT: movprfx z0, z1
558 ; CHECK-NEXT: fabs z0.d, p0/m, z1.d
560 %ret = tail call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %b)
561 ret <vscale x 2 x double> %ret
564 define <vscale x 2 x double> @fabs_f64_dupreg(<vscale x 2 x double> %a) #0 {
565 ; CHECK-LABEL: fabs_f64_dupreg:
567 ; CHECK-NEXT: ptrue p0.d
568 ; CHECK-NEXT: fabs z0.d, p0/m, z0.d
570 %ret = tail call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %a)
571 ret <vscale x 2 x double> %ret
574 define <vscale x 2 x double> @fabs_f64_undef(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
575 ; CHECK-LABEL: fabs_f64_undef:
577 ; CHECK-NEXT: ptrue p0.d
578 ; CHECK-NEXT: movprfx z0, z1
579 ; CHECK-NEXT: fabs z0.d, p0/m, z1.d
581 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
582 %ret = tail call <vscale x 2 x double> @llvm.aarch64.sve.fabs.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b)
583 ret <vscale x 2 x double> %ret
586 define <vscale x 2 x double> @fabs_f64_active(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
587 ; CHECK-LABEL: fabs_f64_active:
589 ; CHECK-NEXT: ptrue p0.d
590 ; CHECK-NEXT: movprfx z0, z1
591 ; CHECK-NEXT: fabs z0.d, p0/m, z1.d
593 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
594 %ret = tail call <vscale x 2 x double> @llvm.aarch64.sve.fabs.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b)
595 ret <vscale x 2 x double> %ret
598 define <vscale x 2 x double> @fabs_f64_not_active(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x i1> %pg) #0 {
599 ; CHECK-LABEL: fabs_f64_not_active:
601 ; CHECK-NEXT: fabs z0.d, p0/m, z1.d
603 %ret = tail call <vscale x 2 x double> @llvm.aarch64.sve.fabs.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b)
604 ret <vscale x 2 x double> %ret
608 ; FSQRT (sve_fp_2op_p_zd_HSD)
611 define <vscale x 8 x half> @fsqrt_f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
612 ; CHECK-LABEL: fsqrt_f16:
614 ; CHECK-NEXT: ptrue p0.h
615 ; CHECK-NEXT: movprfx z0, z1
616 ; CHECK-NEXT: fsqrt z0.h, p0/m, z1.h
618 %ret = tail call <vscale x 8 x half> @llvm.sqrt.nxv8f16(<vscale x 8 x half> %b)
619 ret <vscale x 8 x half> %ret
622 define <vscale x 8 x half> @fsqrt_f16_dupreg(<vscale x 8 x half> %a) #0 {
623 ; CHECK-LABEL: fsqrt_f16_dupreg:
625 ; CHECK-NEXT: ptrue p0.h
626 ; CHECK-NEXT: fsqrt z0.h, p0/m, z0.h
628 %ret = tail call <vscale x 8 x half> @llvm.sqrt.nxv8f16(<vscale x 8 x half> %a)
629 ret <vscale x 8 x half> %ret
632 define <vscale x 8 x half> @fsqrt_f16_undef(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
633 ; CHECK-LABEL: fsqrt_f16_undef:
635 ; CHECK-NEXT: ptrue p0.h
636 ; CHECK-NEXT: movprfx z0, z1
637 ; CHECK-NEXT: fsqrt z0.h, p0/m, z1.h
639 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
640 %ret = tail call <vscale x 8 x half> @llvm.aarch64.sve.fsqrt.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b)
641 ret <vscale x 8 x half> %ret
644 define <vscale x 8 x half> @fsqrt_f16_active(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
645 ; CHECK-LABEL: fsqrt_f16_active:
647 ; CHECK-NEXT: ptrue p0.h
648 ; CHECK-NEXT: movprfx z0, z1
649 ; CHECK-NEXT: fsqrt z0.h, p0/m, z1.h
651 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
652 %ret = tail call <vscale x 8 x half> @llvm.aarch64.sve.fsqrt.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg, <vscale x 8 x half> %b)
653 ret <vscale x 8 x half> %ret
656 define <vscale x 8 x half> @fsqrt_f16_not_active(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
657 ; CHECK-LABEL: fsqrt_f16_not_active:
659 ; CHECK-NEXT: ptrue p0.d
660 ; CHECK-NEXT: fsqrt z0.h, p0/m, z1.h
662 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
663 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
664 %pg.from = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg.to)
665 %ret = tail call <vscale x 8 x half> @llvm.aarch64.sve.fsqrt.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %pg.from, <vscale x 8 x half> %b)
666 ret <vscale x 8 x half> %ret
669 define <vscale x 4 x float> @fsqrt_f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
670 ; CHECK-LABEL: fsqrt_f32:
672 ; CHECK-NEXT: ptrue p0.s
673 ; CHECK-NEXT: movprfx z0, z1
674 ; CHECK-NEXT: fsqrt z0.s, p0/m, z1.s
676 %ret = tail call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> %b)
677 ret <vscale x 4 x float> %ret
680 define <vscale x 4 x float> @fsqrt_f32_dupreg(<vscale x 4 x float> %a) #0 {
681 ; CHECK-LABEL: fsqrt_f32_dupreg:
683 ; CHECK-NEXT: ptrue p0.s
684 ; CHECK-NEXT: fsqrt z0.s, p0/m, z0.s
686 %ret = tail call <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float> %a)
687 ret <vscale x 4 x float> %ret
690 define <vscale x 4 x float> @fsqrt_f32_undef(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
691 ; CHECK-LABEL: fsqrt_f32_undef:
693 ; CHECK-NEXT: ptrue p0.s
694 ; CHECK-NEXT: movprfx z0, z1
695 ; CHECK-NEXT: fsqrt z0.s, p0/m, z1.s
697 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
698 %ret = tail call <vscale x 4 x float> @llvm.aarch64.sve.fsqrt.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b)
699 ret <vscale x 4 x float> %ret
702 define <vscale x 4 x float> @fsqrt_f32_active(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
703 ; CHECK-LABEL: fsqrt_f32_active:
705 ; CHECK-NEXT: ptrue p0.s
706 ; CHECK-NEXT: movprfx z0, z1
707 ; CHECK-NEXT: fsqrt z0.s, p0/m, z1.s
709 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
710 %ret = tail call <vscale x 4 x float> @llvm.aarch64.sve.fsqrt.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg, <vscale x 4 x float> %b)
711 ret <vscale x 4 x float> %ret
714 define <vscale x 4 x float> @fsqrt_f32_not_active(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
715 ; CHECK-LABEL: fsqrt_f32_not_active:
717 ; CHECK-NEXT: ptrue p0.d
718 ; CHECK-NEXT: fsqrt z0.s, p0/m, z1.s
720 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
721 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
722 %pg.from = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.to)
723 %ret = tail call <vscale x 4 x float> @llvm.aarch64.sve.fsqrt.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %pg.from, <vscale x 4 x float> %b)
724 ret <vscale x 4 x float> %ret
727 define <vscale x 2 x double> @fsqrt_f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
728 ; CHECK-LABEL: fsqrt_f64:
730 ; CHECK-NEXT: ptrue p0.d
731 ; CHECK-NEXT: movprfx z0, z1
732 ; CHECK-NEXT: fsqrt z0.d, p0/m, z1.d
734 %ret = tail call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> %b)
735 ret <vscale x 2 x double> %ret
738 define <vscale x 2 x double> @fsqrt_f64_dupreg(<vscale x 2 x double> %a) #0 {
739 ; CHECK-LABEL: fsqrt_f64_dupreg:
741 ; CHECK-NEXT: ptrue p0.d
742 ; CHECK-NEXT: fsqrt z0.d, p0/m, z0.d
744 %ret = tail call <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double> %a)
745 ret <vscale x 2 x double> %ret
748 define <vscale x 2 x double> @fsqrt_f64_undef(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
749 ; CHECK-LABEL: fsqrt_f64_undef:
751 ; CHECK-NEXT: ptrue p0.d
752 ; CHECK-NEXT: movprfx z0, z1
753 ; CHECK-NEXT: fsqrt z0.d, p0/m, z1.d
755 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
756 %ret = tail call <vscale x 2 x double> @llvm.aarch64.sve.fsqrt.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b)
757 ret <vscale x 2 x double> %ret
760 define <vscale x 2 x double> @fsqrt_f64_active(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
761 ; CHECK-LABEL: fsqrt_f64_active:
763 ; CHECK-NEXT: ptrue p0.d
764 ; CHECK-NEXT: movprfx z0, z1
765 ; CHECK-NEXT: fsqrt z0.d, p0/m, z1.d
767 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
768 %ret = tail call <vscale x 2 x double> @llvm.aarch64.sve.fsqrt.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b)
769 ret <vscale x 2 x double> %ret
772 define <vscale x 2 x double> @fsqrt_f64_not_active(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x i1> %pg) #0 {
773 ; CHECK-LABEL: fsqrt_f64_not_active:
775 ; CHECK-NEXT: fsqrt z0.d, p0/m, z1.d
777 %ret = tail call <vscale x 2 x double> @llvm.aarch64.sve.fsqrt.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x i1> %pg, <vscale x 2 x double> %b)
778 ret <vscale x 2 x double> %ret
782 ; SXTB (sve_int_un_pred_arit_0_h)
785 define <vscale x 8 x i16> @sxtb_i16(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
786 ; CHECK-LABEL: sxtb_i16:
788 ; CHECK-NEXT: ptrue p0.h
789 ; CHECK-NEXT: movprfx z0, z1
790 ; CHECK-NEXT: sxtb z0.h, p0/m, z1.h
792 %ret = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
793 ret <vscale x 8 x i16> %ret
796 define <vscale x 8 x i16> @sxtb_i16_dupreg(<vscale x 8 x i8> %a) #0 {
797 ; CHECK-LABEL: sxtb_i16_dupreg:
799 ; CHECK-NEXT: ptrue p0.h
800 ; CHECK-NEXT: sxtb z0.h, p0/m, z0.h
802 %ret = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
803 ret <vscale x 8 x i16> %ret
806 define <vscale x 8 x i16> @sxtb_i16_undef(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
807 ; CHECK-LABEL: sxtb_i16_undef:
809 ; CHECK-NEXT: ptrue p0.h
810 ; CHECK-NEXT: movprfx z0, z1
811 ; CHECK-NEXT: sxtb z0.h, p0/m, z1.h
813 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
814 %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
815 ret <vscale x 8 x i16> %ret
818 define <vscale x 8 x i16> @sxtb_i16_active(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
819 ; CHECK-LABEL: sxtb_i16_active:
821 ; CHECK-NEXT: ptrue p0.h
822 ; CHECK-NEXT: movprfx z0, z1
823 ; CHECK-NEXT: sxtb z0.h, p0/m, z1.h
825 %pg = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
826 %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b)
827 ret <vscale x 8 x i16> %ret
830 define <vscale x 8 x i16> @sxtb_i16_not_active(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
831 ; CHECK-LABEL: sxtb_i16_not_active:
833 ; CHECK-NEXT: ptrue p0.d
834 ; CHECK-NEXT: sxtb z0.h, p0/m, z1.h
836 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
837 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
838 %pg.from = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg.to)
839 %ret = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg.from, <vscale x 8 x i16> %b)
840 ret <vscale x 8 x i16> %ret
843 define <vscale x 4 x i32> @sxtb_i32(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) #0 {
844 ; CHECK-LABEL: sxtb_i32:
846 ; CHECK-NEXT: ptrue p0.s
847 ; CHECK-NEXT: movprfx z0, z1
848 ; CHECK-NEXT: sxtb z0.s, p0/m, z1.s
850 %ret = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
851 ret <vscale x 4 x i32> %ret
854 define <vscale x 4 x i32> @sxtb_i32_dupreg(<vscale x 4 x i8> %a) #0 {
855 ; CHECK-LABEL: sxtb_i32_dupreg:
857 ; CHECK-NEXT: ptrue p0.s
858 ; CHECK-NEXT: sxtb z0.s, p0/m, z0.s
860 %ret = sext <vscale x 4 x i8> %a to <vscale x 4 x i32>
861 ret <vscale x 4 x i32> %ret
864 define <vscale x 4 x i32> @sxtb_i32_undef(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
865 ; CHECK-LABEL: sxtb_i32_undef:
867 ; CHECK-NEXT: ptrue p0.s
868 ; CHECK-NEXT: movprfx z0, z1
869 ; CHECK-NEXT: sxtb z0.s, p0/m, z1.s
871 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
872 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
873 ret <vscale x 4 x i32> %ret
876 define <vscale x 4 x i32> @sxtb_i32_active(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
877 ; CHECK-LABEL: sxtb_i32_active:
879 ; CHECK-NEXT: ptrue p0.s
880 ; CHECK-NEXT: movprfx z0, z1
881 ; CHECK-NEXT: sxtb z0.s, p0/m, z1.s
883 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
884 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
885 ret <vscale x 4 x i32> %ret
888 define <vscale x 4 x i32> @sxtb_i32_not_active(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
889 ; CHECK-LABEL: sxtb_i32_not_active:
891 ; CHECK-NEXT: ptrue p0.d
892 ; CHECK-NEXT: sxtb z0.s, p0/m, z1.s
894 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
895 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
896 %pg.from = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.to)
897 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg.from, <vscale x 4 x i32> %b)
898 ret <vscale x 4 x i32> %ret
901 define <vscale x 2 x i64> @sxtb_i64(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
902 ; CHECK-LABEL: sxtb_i64:
904 ; CHECK-NEXT: ptrue p0.d
905 ; CHECK-NEXT: movprfx z0, z1
906 ; CHECK-NEXT: sxtb z0.d, p0/m, z1.d
908 %ret = sext <vscale x 2 x i8> %b to <vscale x 2 x i64>
909 ret <vscale x 2 x i64> %ret
912 define <vscale x 2 x i64> @sxtb_i64_dupreg(<vscale x 2 x i8> %a) #0 {
913 ; CHECK-LABEL: sxtb_i64_dupreg:
915 ; CHECK-NEXT: ptrue p0.d
916 ; CHECK-NEXT: sxtb z0.d, p0/m, z0.d
918 %ret = sext <vscale x 2 x i8> %a to <vscale x 2 x i64>
919 ret <vscale x 2 x i64> %ret
922 define <vscale x 2 x i64> @sxtb_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
923 ; CHECK-LABEL: sxtb_i64_undef:
925 ; CHECK-NEXT: ptrue p0.d
926 ; CHECK-NEXT: movprfx z0, z1
927 ; CHECK-NEXT: sxtb z0.d, p0/m, z1.d
929 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
930 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
931 ret <vscale x 2 x i64> %ret
934 define <vscale x 2 x i64> @sxtb_i64_active(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
935 ; CHECK-LABEL: sxtb_i64_active:
937 ; CHECK-NEXT: ptrue p0.d
938 ; CHECK-NEXT: movprfx z0, z1
939 ; CHECK-NEXT: sxtb z0.d, p0/m, z1.d
941 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
942 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
943 ret <vscale x 2 x i64> %ret
946 define <vscale x 2 x i64> @sxtb_i64_not_active(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %pg) #0 {
947 ; CHECK-LABEL: sxtb_i64_not_active:
949 ; CHECK-NEXT: sxtb z0.d, p0/m, z1.d
951 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
952 ret <vscale x 2 x i64> %ret
956 ; SXTH (sve_int_un_pred_arit_0_w)
959 define <vscale x 4 x i32> @sxth_i32(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
960 ; CHECK-LABEL: sxth_i32:
962 ; CHECK-NEXT: ptrue p0.s
963 ; CHECK-NEXT: movprfx z0, z1
964 ; CHECK-NEXT: sxth z0.s, p0/m, z1.s
966 %ret = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
967 ret <vscale x 4 x i32> %ret
970 define <vscale x 4 x i32> @sxth_i32_dupreg(<vscale x 4 x i16> %a) #0 {
971 ; CHECK-LABEL: sxth_i32_dupreg:
973 ; CHECK-NEXT: ptrue p0.s
974 ; CHECK-NEXT: sxth z0.s, p0/m, z0.s
976 %ret = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
977 ret <vscale x 4 x i32> %ret
980 define <vscale x 4 x i32> @sxth_i32_undef(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
981 ; CHECK-LABEL: sxth_i32_undef:
983 ; CHECK-NEXT: ptrue p0.s
984 ; CHECK-NEXT: movprfx z0, z1
985 ; CHECK-NEXT: sxth z0.s, p0/m, z1.s
987 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
988 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxth.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
989 ret <vscale x 4 x i32> %ret
992 define <vscale x 4 x i32> @sxth_i32_active(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
993 ; CHECK-LABEL: sxth_i32_active:
995 ; CHECK-NEXT: ptrue p0.s
996 ; CHECK-NEXT: movprfx z0, z1
997 ; CHECK-NEXT: sxth z0.s, p0/m, z1.s
999 %pg = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
1000 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxth.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b)
1001 ret <vscale x 4 x i32> %ret
1004 define <vscale x 4 x i32> @sxth_i32_not_active(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
1005 ; CHECK-LABEL: sxth_i32_not_active:
1007 ; CHECK-NEXT: ptrue p0.d
1008 ; CHECK-NEXT: sxth z0.s, p0/m, z1.s
1010 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
1011 %pg.to = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %pg)
1012 %pg.from = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg.to)
1013 %ret = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sxth.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg.from, <vscale x 4 x i32> %b)
1014 ret <vscale x 4 x i32> %ret
1017 define <vscale x 2 x i64> @sxth_i64(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) #0 {
1018 ; CHECK-LABEL: sxth_i64:
1020 ; CHECK-NEXT: ptrue p0.d
1021 ; CHECK-NEXT: movprfx z0, z1
1022 ; CHECK-NEXT: sxth z0.d, p0/m, z1.d
1024 %ret = sext <vscale x 2 x i16> %b to <vscale x 2 x i64>
1025 ret <vscale x 2 x i64> %ret
1028 define <vscale x 2 x i64> @sxth_i64_dupreg(<vscale x 2 x i16> %a) #0 {
1029 ; CHECK-LABEL: sxth_i64_dupreg:
1031 ; CHECK-NEXT: ptrue p0.d
1032 ; CHECK-NEXT: sxth z0.d, p0/m, z0.d
1034 %ret = sext <vscale x 2 x i16> %a to <vscale x 2 x i64>
1035 ret <vscale x 2 x i64> %ret
1038 define <vscale x 2 x i64> @sxth_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
1039 ; CHECK-LABEL: sxth_i64_undef:
1041 ; CHECK-NEXT: ptrue p0.d
1042 ; CHECK-NEXT: movprfx z0, z1
1043 ; CHECK-NEXT: sxth z0.d, p0/m, z1.d
1045 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
1046 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxth.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
1047 ret <vscale x 2 x i64> %ret
1050 define <vscale x 2 x i64> @sxth_i64_active(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
1051 ; CHECK-LABEL: sxth_i64_active:
1053 ; CHECK-NEXT: ptrue p0.d
1054 ; CHECK-NEXT: movprfx z0, z1
1055 ; CHECK-NEXT: sxth z0.d, p0/m, z1.d
1057 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
1058 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxth.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
1059 ret <vscale x 2 x i64> %ret
1062 define <vscale x 2 x i64> @sxth_i64_not_active(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %pg) #0 {
1063 ; CHECK-LABEL: sxth_i64_not_active:
1065 ; CHECK-NEXT: sxth z0.d, p0/m, z1.d
1067 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxth.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
1068 ret <vscale x 2 x i64> %ret
1072 ; SXTW (sve_int_un_pred_arit_0_d)
1075 define <vscale x 2 x i64> @sxtw_i64(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
1076 ; CHECK-LABEL: sxtw_i64:
1078 ; CHECK-NEXT: ptrue p0.d
1079 ; CHECK-NEXT: movprfx z0, z1
1080 ; CHECK-NEXT: sxtw z0.d, p0/m, z1.d
1082 %ret = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
1083 ret <vscale x 2 x i64> %ret
1086 define <vscale x 2 x i64> @sxtw_i64_dupreg(<vscale x 2 x i32> %a) #0 {
1087 ; CHECK-LABEL: sxtw_i64_dupreg:
1089 ; CHECK-NEXT: ptrue p0.d
1090 ; CHECK-NEXT: sxtw z0.d, p0/m, z0.d
1092 %ret = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
1093 ret <vscale x 2 x i64> %ret
1096 define <vscale x 2 x i64> @sxtw_i64_undef(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
1097 ; CHECK-LABEL: sxtw_i64_undef:
1099 ; CHECK-NEXT: ptrue p0.d
1100 ; CHECK-NEXT: movprfx z0, z1
1101 ; CHECK-NEXT: sxtw z0.d, p0/m, z1.d
1103 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
1104 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
1105 ret <vscale x 2 x i64> %ret
1108 define <vscale x 2 x i64> @sxtw_i64_active(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
1109 ; CHECK-LABEL: sxtw_i64_active:
1111 ; CHECK-NEXT: ptrue p0.d
1112 ; CHECK-NEXT: movprfx z0, z1
1113 ; CHECK-NEXT: sxtw z0.d, p0/m, z1.d
1115 %pg = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
1116 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
1117 ret <vscale x 2 x i64> %ret
1120 define <vscale x 2 x i64> @sxtw_i64_not_active(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %pg) #0 {
1121 ; CHECK-LABEL: sxtw_i64_not_active:
1123 ; CHECK-NEXT: sxtw z0.d, p0/m, z1.d
1125 %ret = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b)
1126 ret <vscale x 2 x i64> %ret
1129 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32)
1130 declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32)
1131 declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32)
1132 declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32)
1134 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>)
1135 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>)
1136 declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>)
1138 declare <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1>)
1139 declare <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1>)
1140 declare <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1>)
1142 declare <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>)
1143 declare <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
1144 declare <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
1145 declare <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
1147 declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1)
1148 declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1)
1149 declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1)
1150 declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1)
1152 declare <vscale x 16 x i8> @llvm.aarch64.sve.cls.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>)
1153 declare <vscale x 8 x i16> @llvm.aarch64.sve.cls.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
1154 declare <vscale x 4 x i32> @llvm.aarch64.sve.cls.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
1155 declare <vscale x 2 x i64> @llvm.aarch64.sve.cls.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
1157 declare <vscale x 8 x half> @llvm.aarch64.sve.fabs.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1158 declare <vscale x 4 x float> @llvm.aarch64.sve.fabs.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1159 declare <vscale x 2 x double> @llvm.aarch64.sve.fabs.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1161 declare <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half>)
1162 declare <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float>)
1163 declare <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double>)
1165 declare <vscale x 8 x half> @llvm.aarch64.sve.fsqrt.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>)
1166 declare <vscale x 4 x float> @llvm.aarch64.sve.fsqrt.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>)
1167 declare <vscale x 2 x double> @llvm.aarch64.sve.fsqrt.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>)
1169 declare <vscale x 8 x half> @llvm.sqrt.nxv8f16(<vscale x 8 x half>)
1170 declare <vscale x 4 x float> @llvm.sqrt.nxv4f32(<vscale x 4 x float>)
1171 declare <vscale x 2 x double> @llvm.sqrt.nxv2f64(<vscale x 2 x double>)
1173 declare <vscale x 8 x i16> @llvm.aarch64.sve.sxtb.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>)
1174 declare <vscale x 4 x i32> @llvm.aarch64.sve.sxtb.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
1175 declare <vscale x 2 x i64> @llvm.aarch64.sve.sxtb.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
1177 declare <vscale x 4 x i32> @llvm.aarch64.sve.sxth.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
1178 declare <vscale x 2 x i64> @llvm.aarch64.sve.sxth.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
1180 declare <vscale x 2 x i64> @llvm.aarch64.sve.sxtw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
1182 attributes #0 = { nounwind "target-features"="+sve" }