1 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
7 define <vscale x 16 x i8> @shrnb_h(<vscale x 8 x i16> %a) {
8 ; CHECK-LABEL: shrnb_h:
9 ; CHECK: shrnb z0.b, z0.h, #8
11 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16> %a,
13 ret <vscale x 16 x i8> %out
16 define <vscale x 8 x i16> @shrnb_s(<vscale x 4 x i32> %a) {
17 ; CHECK-LABEL: shrnb_s:
18 ; CHECK: shrnb z0.h, z0.s, #16
20 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32> %a,
22 ret <vscale x 8 x i16> %out
25 define <vscale x 4 x i32> @shrnb_d(<vscale x 2 x i64> %a) {
26 ; CHECK-LABEL: shrnb_d:
27 ; CHECK: shrnb z0.s, z0.d, #32
29 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64> %a,
31 ret <vscale x 4 x i32> %out
38 define <vscale x 16 x i8> @uqshrnb_h(<vscale x 8 x i16> %a) {
39 ; CHECK-LABEL: uqshrnb_h:
40 ; CHECK: uqshrnb z0.b, z0.h, #1
42 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqshrnb.nxv8i16(<vscale x 8 x i16> %a,
44 ret <vscale x 16 x i8> %out
47 define <vscale x 8 x i16> @uqshrnb_s(<vscale x 4 x i32> %a) {
48 ; CHECK-LABEL: uqshrnb_s:
49 ; CHECK: uqshrnb z0.h, z0.s, #1
51 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqshrnb.nxv4i32(<vscale x 4 x i32> %a,
53 ret <vscale x 8 x i16> %out
56 define <vscale x 4 x i32> @uqshrnb_d(<vscale x 2 x i64> %a) {
57 ; CHECK-LABEL: uqshrnb_d:
58 ; CHECK: uqshrnb z0.s, z0.d, #1
60 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqshrnb.nxv2i64(<vscale x 2 x i64> %a,
62 ret <vscale x 4 x i32> %out
69 define <vscale x 16 x i8> @sqshrnb_h(<vscale x 8 x i16> %a) {
70 ; CHECK-LABEL: sqshrnb_h:
71 ; CHECK: sqshrnb z0.b, z0.h, #1
73 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshrnb.nxv8i16(<vscale x 8 x i16> %a,
75 ret <vscale x 16 x i8> %out
78 define <vscale x 8 x i16> @sqshrnb_s(<vscale x 4 x i32> %a) {
79 ; CHECK-LABEL: sqshrnb_s:
80 ; CHECK: sqshrnb z0.h, z0.s, #1
82 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshrnb.nxv4i32(<vscale x 4 x i32> %a,
84 ret <vscale x 8 x i16> %out
87 define <vscale x 4 x i32> @sqshrnb_d(<vscale x 2 x i64> %a) {
88 ; CHECK-LABEL: sqshrnb_d:
89 ; CHECK: sqshrnb z0.s, z0.d, #1
91 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshrnb.nxv2i64(<vscale x 2 x i64> %a,
93 ret <vscale x 4 x i32> %out
100 define <vscale x 16 x i8> @sqshrunb_h(<vscale x 8 x i16> %a) {
101 ; CHECK-LABEL: qshrunb_h:
102 ; CHECK: sqshrunb z0.b, z0.h, #7
104 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshrunb.nxv8i16(<vscale x 8 x i16> %a,
106 ret <vscale x 16 x i8> %out
109 define <vscale x 8 x i16> @sqshrunb_s(<vscale x 4 x i32> %a) {
110 ; CHECK-LABEL: sqshrunb_s:
111 ; CHECK: sqshrunb z0.h, z0.s, #15
113 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshrunb.nxv4i32(<vscale x 4 x i32> %a,
115 ret <vscale x 8 x i16> %out
118 define <vscale x 4 x i32> @sqshrunb_d(<vscale x 2 x i64> %a) {
119 ; CHECK-LABEL: sqshrunb_d:
120 ; CHECK: sqshrunb z0.s, z0.d, #31
122 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshrunb.nxv2i64(<vscale x 2 x i64> %a,
124 ret <vscale x 4 x i32> %out
131 define <vscale x 16 x i8> @uqrshrnb_h(<vscale x 8 x i16> %a) {
132 ; CHECK-LABEL: uqrshrnb_h:
133 ; CHECK: uqrshrnb z0.b, z0.h, #2
135 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrnb.nxv8i16(<vscale x 8 x i16> %a,
137 ret <vscale x 16 x i8> %out
140 define <vscale x 8 x i16> @uqrshrnb_s(<vscale x 4 x i32> %a) {
141 ; CHECK-LABEL: uqrshrnb_s:
142 ; CHECK: uqrshrnb z0.h, z0.s, #2
144 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrnb.nxv4i32(<vscale x 4 x i32> %a,
146 ret <vscale x 8 x i16> %out
149 define <vscale x 4 x i32> @uqrshrnb_d(<vscale x 2 x i64> %a) {
150 ; CHECK-LABEL: uqrshrnb_d:
151 ; CHECK: uqrshrnb z0.s, z0.d, #2
153 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqrshrnb.nxv2i64(<vscale x 2 x i64> %a,
155 ret <vscale x 4 x i32> %out
162 define <vscale x 16 x i8> @sqrshrnb_h(<vscale x 8 x i16> %a) {
163 ; CHECK-LABEL: sqrshrnb_h:
164 ; CHECK: sqrshrnb z0.b, z0.h, #2
166 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrnb.nxv8i16(<vscale x 8 x i16> %a,
168 ret <vscale x 16 x i8> %out
171 define <vscale x 8 x i16> @sqrshrnb_s(<vscale x 4 x i32> %a) {
172 ; CHECK-LABEL: sqrshrnb_s:
173 ; CHECK: sqrshrnb z0.h, z0.s, #2
175 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrnb.nxv4i32(<vscale x 4 x i32> %a,
177 ret <vscale x 8 x i16> %out
180 define <vscale x 4 x i32> @sqrshrnb_d(<vscale x 2 x i64> %a) {
181 ; CHECK-LABEL: sqrshrnb_d:
182 ; CHECK: sqrshrnb z0.s, z0.d, #2
184 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrnb.nxv2i64(<vscale x 2 x i64> %a,
186 ret <vscale x 4 x i32> %out
193 define <vscale x 16 x i8> @sqrshrunb_h(<vscale x 8 x i16> %a) {
194 ; CHECK-LABEL: sqrshrunb_h:
195 ; CHECK: sqrshrunb z0.b, z0.h, #6
197 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrunb.nxv8i16(<vscale x 8 x i16> %a,
199 ret <vscale x 16 x i8> %out
202 define <vscale x 8 x i16> @sqrshrunb_s(<vscale x 4 x i32> %a) {
203 ; CHECK-LABEL: sqrshrunb_s:
204 ; CHECK: sqrshrunb z0.h, z0.s, #14
206 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrunb.nxv4i32(<vscale x 4 x i32> %a,
208 ret <vscale x 8 x i16> %out
211 define <vscale x 4 x i32> @sqrshrunb_d(<vscale x 2 x i64> %a) {
212 ; CHECK-LABEL: sqrshrunb_d:
213 ; CHECK: sqrshrunb z0.s, z0.d, #30
215 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrunb.nxv2i64(<vscale x 2 x i64> %a,
217 ret <vscale x 4 x i32> %out
224 define <vscale x 16 x i8> @shrnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
225 ; CHECK-LABEL: shrnt_h:
226 ; CHECK: shrnt z0.b, z1.h, #3
228 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.shrnt.nxv8i16(<vscale x 16 x i8> %a,
229 <vscale x 8 x i16> %b,
231 ret <vscale x 16 x i8> %out
234 define <vscale x 8 x i16> @shrnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
235 ; CHECK-LABEL: shrnt_s:
236 ; CHECK: shrnt z0.h, z1.s, #3
238 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.shrnt.nxv4i32(<vscale x 8 x i16> %a,
239 <vscale x 4 x i32> %b,
241 ret <vscale x 8 x i16> %out
244 define <vscale x 4 x i32> @shrnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
245 ; CHECK-LABEL: shrnt_d:
246 ; CHECK: shrnt z0.s, z1.d, #3
248 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.shrnt.nxv2i64(<vscale x 4 x i32> %a,
249 <vscale x 2 x i64> %b,
251 ret <vscale x 4 x i32> %out
258 define <vscale x 16 x i8> @uqshrnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
259 ; CHECK-LABEL: uqshrnt_h:
260 ; CHECK: uqshrnt z0.b, z1.h, #5
262 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqshrnt.nxv8i16(<vscale x 16 x i8> %a,
263 <vscale x 8 x i16> %b,
265 ret <vscale x 16 x i8> %out
268 define <vscale x 8 x i16> @uqshrnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
269 ; CHECK-LABEL: uqshrnt_s:
270 ; CHECK: uqshrnt z0.h, z1.s, #13
272 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqshrnt.nxv4i32(<vscale x 8 x i16> %a,
273 <vscale x 4 x i32> %b,
275 ret <vscale x 8 x i16> %out
278 define <vscale x 4 x i32> @uqshrnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
279 ; CHECK-LABEL: uqshrnt_d:
280 ; CHECK: uqshrnt z0.s, z1.d, #29
282 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqshrnt.nxv2i64(<vscale x 4 x i32> %a,
283 <vscale x 2 x i64> %b,
285 ret <vscale x 4 x i32> %out
292 define <vscale x 16 x i8> @sqshrnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
293 ; CHECK-LABEL: sqshrnt_h:
294 ; CHECK: sqshrnt z0.b, z1.h, #5
296 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshrnt.nxv8i16(<vscale x 16 x i8> %a,
297 <vscale x 8 x i16> %b,
299 ret <vscale x 16 x i8> %out
302 define <vscale x 8 x i16> @sqshrnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
303 ; CHECK-LABEL: sqshrnt_s:
304 ; CHECK: sqshrnt z0.h, z1.s, #13
306 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshrnt.nxv4i32(<vscale x 8 x i16> %a,
307 <vscale x 4 x i32> %b,
309 ret <vscale x 8 x i16> %out
312 define <vscale x 4 x i32> @sqshrnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
313 ; CHECK-LABEL: sqshrnt_d:
314 ; CHECK: sqshrnt z0.s, z1.d, #29
316 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshrnt.nxv2i64(<vscale x 4 x i32> %a,
317 <vscale x 2 x i64> %b,
319 ret <vscale x 4 x i32> %out
326 define <vscale x 16 x i8> @sqshrunt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
327 ; CHECK-LABEL: sqshrunt_h:
328 ; CHECK: sqshrunt z0.b, z1.h, #4
330 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqshrunt.nxv8i16(<vscale x 16 x i8> %a,
331 <vscale x 8 x i16> %b,
333 ret <vscale x 16 x i8> %out
336 define <vscale x 8 x i16> @sqshrunt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
337 ; CHECK-LABEL: sqshrunt_s:
338 ; CHECK: sqshrunt z0.h, z1.s, #4
340 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqshrunt.nxv4i32(<vscale x 8 x i16> %a,
341 <vscale x 4 x i32> %b,
343 ret <vscale x 8 x i16> %out
346 define <vscale x 4 x i32> @sqshrunt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
347 ; CHECK-LABEL: sqshrunt_d:
348 ; CHECK: sqshrunt z0.s, z1.d, #4
350 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqshrunt.nxv2i64(<vscale x 4 x i32> %a,
351 <vscale x 2 x i64> %b,
353 ret <vscale x 4 x i32> %out
360 define <vscale x 16 x i8> @uqrshrnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
361 ; CHECK-LABEL: uqrshrnt_h:
362 ; CHECK: uqrshrnt z0.b, z1.h, #8
364 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrnt.nxv8i16(<vscale x 16 x i8> %a,
365 <vscale x 8 x i16> %b,
367 ret <vscale x 16 x i8> %out
370 define <vscale x 8 x i16> @uqrshrnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
371 ; CHECK-LABEL: uqrshrnt_s:
372 ; CHECK: uqrshrnt z0.h, z1.s, #12
374 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrnt.nxv4i32(<vscale x 8 x i16> %a,
375 <vscale x 4 x i32> %b,
377 ret <vscale x 8 x i16> %out
380 define <vscale x 4 x i32> @uqrshrnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
381 ; CHECK-LABEL: uqrshrnt_d:
382 ; CHECK: uqrshrnt z0.s, z1.d, #28
384 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqrshrnt.nxv2i64(<vscale x 4 x i32> %a,
385 <vscale x 2 x i64> %b,
387 ret <vscale x 4 x i32> %out
394 define <vscale x 16 x i8> @sqrshrnt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
395 ; CHECK-LABEL: sqrshrnt_h:
396 ; CHECK: sqrshrnt z0.b, z1.h, #8
398 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrnt.nxv8i16(<vscale x 16 x i8> %a,
399 <vscale x 8 x i16> %b,
401 ret <vscale x 16 x i8> %out
404 define <vscale x 8 x i16> @sqrshrnt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
405 ; CHECK-LABEL: sqrshrnt_s:
406 ; CHECK: sqrshrnt z0.h, z1.s, #12
408 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrnt.nxv4i32(<vscale x 8 x i16> %a,
409 <vscale x 4 x i32> %b,
411 ret <vscale x 8 x i16> %out
414 define <vscale x 4 x i32> @sqrshrnt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
415 ; CHECK-LABEL: sqrshrnt_d:
416 ; CHECK: sqrshrnt z0.s, z1.d, #28
418 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrnt.nxv2i64(<vscale x 4 x i32> %a,
419 <vscale x 2 x i64> %b,
421 ret <vscale x 4 x i32> %out
428 define <vscale x 16 x i8> @sqrshrunt_h(<vscale x 16 x i8> %a, <vscale x 8 x i16> %b) {
429 ; CHECK-LABEL: sqrshrunt_h:
430 ; CHECK: sqrshrunt z0.b, z1.h, #1
432 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrunt.nxv8i16(<vscale x 16 x i8> %a,
433 <vscale x 8 x i16> %b,
435 ret <vscale x 16 x i8> %out
438 define <vscale x 8 x i16> @sqrshrunt_s(<vscale x 8 x i16> %a, <vscale x 4 x i32> %b) {
439 ; CHECK-LABEL: sqrshrunt_s:
440 ; CHECK: sqrshrunt z0.h, z1.s, #5
442 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrunt.nxv4i32(<vscale x 8 x i16> %a,
443 <vscale x 4 x i32> %b,
445 ret <vscale x 8 x i16> %out
448 define <vscale x 4 x i32> @sqrshrunt_d(<vscale x 4 x i32> %a, <vscale x 2 x i64> %b) {
449 ; CHECK-LABEL: sqrshrunt_d:
450 ; CHECK: sqrshrunt z0.s, z1.d, #5
452 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrunt.nxv2i64(<vscale x 4 x i32> %a,
453 <vscale x 2 x i64> %b,
455 ret <vscale x 4 x i32> %out
458 declare <vscale x 16 x i8> @llvm.aarch64.sve.shrnb.nxv8i16(<vscale x 8 x i16>, i32)
459 declare <vscale x 8 x i16> @llvm.aarch64.sve.shrnb.nxv4i32(<vscale x 4 x i32>, i32)
460 declare <vscale x 4 x i32> @llvm.aarch64.sve.shrnb.nxv2i64(<vscale x 2 x i64>, i32)
462 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqshrnb.nxv8i16(<vscale x 8 x i16>, i32)
463 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqshrnb.nxv4i32(<vscale x 4 x i32>, i32)
464 declare <vscale x 4 x i32> @llvm.aarch64.sve.uqshrnb.nxv2i64(<vscale x 2 x i64>, i32)
466 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqshrnb.nxv8i16(<vscale x 8 x i16>, i32)
467 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqshrnb.nxv4i32(<vscale x 4 x i32>, i32)
468 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqshrnb.nxv2i64(<vscale x 2 x i64>, i32)
470 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrnb.nxv8i16(<vscale x 8 x i16>, i32)
471 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrnb.nxv4i32(<vscale x 4 x i32>, i32)
472 declare <vscale x 4 x i32> @llvm.aarch64.sve.uqrshrnb.nxv2i64(<vscale x 2 x i64>, i32)
474 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrnb.nxv8i16(<vscale x 8 x i16>, i32)
475 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrnb.nxv4i32(<vscale x 4 x i32>, i32)
476 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrnb.nxv2i64(<vscale x 2 x i64>, i32)
478 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqshrunb.nxv8i16(<vscale x 8 x i16>, i32)
479 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqshrunb.nxv4i32(<vscale x 4 x i32>, i32)
480 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqshrunb.nxv2i64(<vscale x 2 x i64>, i32)
482 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrunb.nxv8i16(<vscale x 8 x i16>, i32)
483 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrunb.nxv4i32(<vscale x 4 x i32>, i32)
484 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrunb.nxv2i64(<vscale x 2 x i64>, i32)
486 declare <vscale x 16 x i8> @llvm.aarch64.sve.shrnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
487 declare <vscale x 8 x i16> @llvm.aarch64.sve.shrnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
488 declare <vscale x 4 x i32> @llvm.aarch64.sve.shrnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
490 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqshrnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
491 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqshrnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
492 declare <vscale x 4 x i32> @llvm.aarch64.sve.uqshrnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
494 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqshrnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
495 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqshrnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
496 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqshrnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
498 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqshrunt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
499 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqshrunt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
500 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqshrunt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
502 declare <vscale x 16 x i8> @llvm.aarch64.sve.uqrshrnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
503 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
504 declare <vscale x 4 x i32> @llvm.aarch64.sve.uqrshrnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
506 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrnt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
507 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrnt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
508 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrnt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)
510 declare <vscale x 16 x i8> @llvm.aarch64.sve.sqrshrunt.nxv8i16(<vscale x 16 x i8>, <vscale x 8 x i16>, i32)
511 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrunt.nxv4i32(<vscale x 8 x i16>, <vscale x 4 x i32>, i32)
512 declare <vscale x 4 x i32> @llvm.aarch64.sve.sqrshrunt.nxv2i64(<vscale x 4 x i32>, <vscale x 2 x i64>, i32)