1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -mattr=+use-experimental-zeroing-pseudos < %s | FileCheck %s
8 define <vscale x 16 x i8> @asr_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
9 ; CHECK-LABEL: asr_i8_zero:
11 ; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
12 ; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.b
14 %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
15 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> %pg,
16 <vscale x 16 x i8> %a_z,
17 <vscale x 16 x i8> %b)
18 ret <vscale x 16 x i8> %out
21 define <vscale x 8 x i16> @asr_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
22 ; CHECK-LABEL: asr_i16_zero:
24 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
25 ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.h
27 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
28 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> %pg,
29 <vscale x 8 x i16> %a_z,
30 <vscale x 8 x i16> %b)
31 ret <vscale x 8 x i16> %out
34 define <vscale x 4 x i32> @asr_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
35 ; CHECK-LABEL: asr_i32_zero:
37 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
38 ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.s
40 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
41 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> %pg,
42 <vscale x 4 x i32> %a_z,
43 <vscale x 4 x i32> %b)
44 ret <vscale x 4 x i32> %out
47 define <vscale x 2 x i64> @asr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
48 ; CHECK-LABEL: asr_i64_zero:
50 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
51 ; CHECK-NEXT: asr z0.d, p0/m, z0.d, z1.d
53 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
54 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> %pg,
55 <vscale x 2 x i64> %a_z,
56 <vscale x 2 x i64> %b)
57 ret <vscale x 2 x i64> %out
60 define <vscale x 16 x i8> @asr_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
61 ; CHECK-LABEL: asr_wide_i8_zero:
63 ; CHECK-NEXT: mov z2.b, #0 // =0x0
64 ; CHECK-NEXT: sel z0.b, p0, z0.b, z2.b
65 ; CHECK-NEXT: asr z0.b, p0/m, z0.b, z1.d
67 %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
68 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> %pg,
69 <vscale x 16 x i8> %a_z,
70 <vscale x 2 x i64> %b)
71 ret <vscale x 16 x i8> %out
74 define <vscale x 8 x i16> @asr_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
75 ; CHECK-LABEL: asr_wide_i16_zero:
77 ; CHECK-NEXT: mov z2.h, #0 // =0x0
78 ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h
79 ; CHECK-NEXT: asr z0.h, p0/m, z0.h, z1.d
81 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
82 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> %pg,
83 <vscale x 8 x i16> %a_z,
84 <vscale x 2 x i64> %b)
85 ret <vscale x 8 x i16> %out
88 define <vscale x 4 x i32> @asr_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
89 ; CHECK-LABEL: asr_wide_i32_zero:
91 ; CHECK-NEXT: mov z2.s, #0 // =0x0
92 ; CHECK-NEXT: sel z0.s, p0, z0.s, z2.s
93 ; CHECK-NEXT: asr z0.s, p0/m, z0.s, z1.d
95 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
96 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> %pg,
97 <vscale x 4 x i32> %a_z,
98 <vscale x 2 x i64> %b)
99 ret <vscale x 4 x i32> %out
106 define <vscale x 16 x i8> @asrd_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
107 ; CHECK-LABEL: asrd_i8_zero:
109 ; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
110 ; CHECK-NEXT: asrd z0.b, p0/m, z0.b, #1
112 %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
113 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1> %pg,
114 <vscale x 16 x i8> %a_z,
116 ret <vscale x 16 x i8> %out
119 define <vscale x 8 x i16> @asrd_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
120 ; CHECK-LABEL: asrd_i16_zero:
122 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
123 ; CHECK-NEXT: asrd z0.h, p0/m, z0.h, #2
125 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
126 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1> %pg,
127 <vscale x 8 x i16> %a_z,
129 ret <vscale x 8 x i16> %out
132 define <vscale x 4 x i32> @asrd_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
133 ; CHECK-LABEL: asrd_i32_zero:
135 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
136 ; CHECK-NEXT: asrd z0.s, p0/m, z0.s, #31
138 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
139 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1> %pg,
140 <vscale x 4 x i32> %a_z,
142 ret <vscale x 4 x i32> %out
145 define <vscale x 2 x i64> @asrd_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a) {
146 ; CHECK-LABEL: asrd_i64_zero:
148 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
149 ; CHECK-NEXT: asrd z0.d, p0/m, z0.d, #64
151 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
152 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1> %pg,
153 <vscale x 2 x i64> %a_z,
155 ret <vscale x 2 x i64> %out
162 define <vscale x 16 x i8> @lsl_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
163 ; CHECK-LABEL: lsl_i8_zero:
165 ; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
166 ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.b
168 %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
169 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> %pg,
170 <vscale x 16 x i8> %a_z,
171 <vscale x 16 x i8> %b)
172 ret <vscale x 16 x i8> %out
175 define <vscale x 8 x i16> @lsl_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
176 ; CHECK-LABEL: lsl_i16_zero:
178 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
179 ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.h
181 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
182 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> %pg,
183 <vscale x 8 x i16> %a_z,
184 <vscale x 8 x i16> %b)
185 ret <vscale x 8 x i16> %out
188 define <vscale x 4 x i32> @lsl_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
189 ; CHECK-LABEL: lsl_i32_zero:
191 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
192 ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.s
194 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
195 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> %pg,
196 <vscale x 4 x i32> %a_z,
197 <vscale x 4 x i32> %b)
198 ret <vscale x 4 x i32> %out
201 define <vscale x 2 x i64> @lsl_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
202 ; CHECK-LABEL: lsl_i64_zero:
204 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
205 ; CHECK-NEXT: lsl z0.d, p0/m, z0.d, z1.d
207 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
208 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> %pg,
209 <vscale x 2 x i64> %a_z,
210 <vscale x 2 x i64> %b)
211 ret <vscale x 2 x i64> %out
214 define <vscale x 16 x i8> @lsl_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
215 ; CHECK-LABEL: lsl_wide_i8_zero:
217 ; CHECK-NEXT: mov z2.b, #0 // =0x0
218 ; CHECK-NEXT: sel z0.b, p0, z0.b, z2.b
219 ; CHECK-NEXT: lsl z0.b, p0/m, z0.b, z1.d
221 %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
222 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> %pg,
223 <vscale x 16 x i8> %a_z,
224 <vscale x 2 x i64> %b)
225 ret <vscale x 16 x i8> %out
228 define <vscale x 8 x i16> @lsl_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
229 ; CHECK-LABEL: lsl_wide_i16_zero:
231 ; CHECK-NEXT: mov z2.h, #0 // =0x0
232 ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h
233 ; CHECK-NEXT: lsl z0.h, p0/m, z0.h, z1.d
235 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
236 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> %pg,
237 <vscale x 8 x i16> %a_z,
238 <vscale x 2 x i64> %b)
239 ret <vscale x 8 x i16> %out
242 define <vscale x 4 x i32> @lsl_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
243 ; CHECK-LABEL: lsl_wide_i32_zero:
245 ; CHECK-NEXT: mov z2.s, #0 // =0x0
246 ; CHECK-NEXT: sel z0.s, p0, z0.s, z2.s
247 ; CHECK-NEXT: lsl z0.s, p0/m, z0.s, z1.d
249 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
250 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> %pg,
251 <vscale x 4 x i32> %a_z,
252 <vscale x 2 x i64> %b)
253 ret <vscale x 4 x i32> %out
260 define <vscale x 16 x i8> @lsr_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
261 ; CHECK-LABEL: lsr_i8_zero:
263 ; CHECK-NEXT: movprfx z0.b, p0/z, z0.b
264 ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.b
266 %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
267 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> %pg,
268 <vscale x 16 x i8> %a_z,
269 <vscale x 16 x i8> %b)
270 ret <vscale x 16 x i8> %out
273 define <vscale x 8 x i16> @lsr_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
274 ; CHECK-LABEL: lsr_i16_zero:
276 ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h
277 ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.h
279 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
280 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> %pg,
281 <vscale x 8 x i16> %a_z,
282 <vscale x 8 x i16> %b)
283 ret <vscale x 8 x i16> %out
286 define <vscale x 4 x i32> @lsr_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
287 ; CHECK-LABEL: lsr_i32_zero:
289 ; CHECK-NEXT: movprfx z0.s, p0/z, z0.s
290 ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.s
292 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
293 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> %pg,
294 <vscale x 4 x i32> %a_z,
295 <vscale x 4 x i32> %b)
296 ret <vscale x 4 x i32> %out
299 define <vscale x 2 x i64> @lsr_i64_zero(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
300 ; CHECK-LABEL: lsr_i64_zero:
302 ; CHECK-NEXT: movprfx z0.d, p0/z, z0.d
303 ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z1.d
305 %a_z = select <vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> zeroinitializer
306 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> %pg,
307 <vscale x 2 x i64> %a_z,
308 <vscale x 2 x i64> %b)
309 ret <vscale x 2 x i64> %out
312 define <vscale x 16 x i8> @lsr_wide_i8_zero(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 2 x i64> %b) {
313 ; CHECK-LABEL: lsr_wide_i8_zero:
315 ; CHECK-NEXT: mov z2.b, #0 // =0x0
316 ; CHECK-NEXT: sel z0.b, p0, z0.b, z2.b
317 ; CHECK-NEXT: lsr z0.b, p0/m, z0.b, z1.d
319 %a_z = select <vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> zeroinitializer
320 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> %pg,
321 <vscale x 16 x i8> %a_z,
322 <vscale x 2 x i64> %b)
323 ret <vscale x 16 x i8> %out
326 define <vscale x 8 x i16> @lsr_wide_i16_zero(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 2 x i64> %b) {
327 ; CHECK-LABEL: lsr_wide_i16_zero:
329 ; CHECK-NEXT: mov z2.h, #0 // =0x0
330 ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h
331 ; CHECK-NEXT: lsr z0.h, p0/m, z0.h, z1.d
333 %a_z = select <vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> zeroinitializer
334 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> %pg,
335 <vscale x 8 x i16> %a_z,
336 <vscale x 2 x i64> %b)
337 ret <vscale x 8 x i16> %out
340 define <vscale x 4 x i32> @lsr_wide_i32_zero(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
341 ; CHECK-LABEL: lsr_wide_i32_zero:
343 ; CHECK-NEXT: mov z2.s, #0 // =0x0
344 ; CHECK-NEXT: sel z0.s, p0, z0.s, z2.s
345 ; CHECK-NEXT: lsr z0.s, p0/m, z0.s, z1.d
347 %a_z = select <vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> zeroinitializer
348 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> %pg,
349 <vscale x 4 x i32> %a_z,
350 <vscale x 2 x i64> %b)
351 ret <vscale x 4 x i32> %out
354 declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
355 declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
356 declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
357 declare <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
359 declare <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
360 declare <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
361 declare <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
363 declare <vscale x 16 x i8> @llvm.aarch64.sve.asrd.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, i32)
364 declare <vscale x 8 x i16> @llvm.aarch64.sve.asrd.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, i32)
365 declare <vscale x 4 x i32> @llvm.aarch64.sve.asrd.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i32)
366 declare <vscale x 2 x i64> @llvm.aarch64.sve.asrd.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i32)
368 declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
369 declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
370 declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
371 declare <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
373 declare <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
374 declare <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
375 declare <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
377 declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
378 declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
379 declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
380 declare <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
382 declare <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
383 declare <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
384 declare <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)