1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -verify-machineinstrs < %s | FileCheck %s
8 define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_interleave_x2_s16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
9 ; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x2_s16:
11 ; CHECK-NEXT: mov z3.d, z2.d
12 ; CHECK-NEXT: mov z2.d, z1.d
13 ; CHECK-NEXT: sqrshrn z0.h, { z2.s, z3.s }, #16
15 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrn.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
16 ret <vscale x 8 x i16> %res
19 define <vscale x 8 x i16> @multi_vector_sat_shift_narrow_interleave_x2_u16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
20 ; CHECK-LABEL: multi_vector_sat_shift_narrow_interleave_x2_u16:
22 ; CHECK-NEXT: mov z3.d, z2.d
23 ; CHECK-NEXT: mov z2.d, z1.d
24 ; CHECK-NEXT: uqrshrn z0.h, { z2.s, z3.s }, #16
26 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrn.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
27 ret <vscale x 8 x i16> %res
34 define <vscale x 8 x i16> @multi_vector_sat_shift_unsigned_narrow_interleave_x2_s16(<vscale x 4 x i32> %unused, <vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2) {
35 ; CHECK-LABEL: multi_vector_sat_shift_unsigned_narrow_interleave_x2_s16:
37 ; CHECK-NEXT: mov z3.d, z2.d
38 ; CHECK-NEXT: mov z2.d, z1.d
39 ; CHECK-NEXT: sqrshrun z0.h, { z2.s, z3.s }, #16
41 %res = call <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrun.x2.nxv8i16(<vscale x 4 x i32> %zn1, <vscale x 4 x i32> %zn2, i32 16)
42 ret <vscale x 8 x i16> %res
45 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrn.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
46 declare <vscale x 8 x i16> @llvm.aarch64.sve.uqrshrn.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)
48 declare <vscale x 8 x i16> @llvm.aarch64.sve.sqrshrun.x2.nxv8i16(<vscale x 4 x i32>, <vscale x 4 x i32>, i32)