Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / aarch64-sve-intrinsics / acle_sve_lsr.c
blob470042a0e7f5656955f0cc667bfcf6ebfba445ff
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
8 #include <arm_sve.h>
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
13 #else
14 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
15 #endif
17 // CHECK-LABEL: @test_svlsr_u8_z(
18 // CHECK-NEXT: entry:
19 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
20 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
21 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
23 // CPP-CHECK-LABEL: @_Z15test_svlsr_u8_zu10__SVBool_tu11__SVUint8_tS0_(
24 // CPP-CHECK-NEXT: entry:
25 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
26 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
27 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
29 svuint8_t test_svlsr_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
31 return SVE_ACLE_FUNC(svlsr,_u8,_z,)(pg, op1, op2);
34 // CHECK-LABEL: @test_svlsr_u16_z(
35 // CHECK-NEXT: entry:
36 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
37 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
38 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
39 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
41 // CPP-CHECK-LABEL: @_Z16test_svlsr_u16_zu10__SVBool_tu12__SVUint16_tS0_(
42 // CPP-CHECK-NEXT: entry:
43 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
44 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
45 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
46 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
48 svuint16_t test_svlsr_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
50 return SVE_ACLE_FUNC(svlsr,_u16,_z,)(pg, op1, op2);
53 // CHECK-LABEL: @test_svlsr_u32_z(
54 // CHECK-NEXT: entry:
55 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
56 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
57 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
58 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
60 // CPP-CHECK-LABEL: @_Z16test_svlsr_u32_zu10__SVBool_tu12__SVUint32_tS0_(
61 // CPP-CHECK-NEXT: entry:
62 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
63 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
64 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
65 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
67 svuint32_t test_svlsr_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
69 return SVE_ACLE_FUNC(svlsr,_u32,_z,)(pg, op1, op2);
72 // CHECK-LABEL: @test_svlsr_u64_z(
73 // CHECK-NEXT: entry:
74 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
75 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
76 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
77 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
79 // CPP-CHECK-LABEL: @_Z16test_svlsr_u64_zu10__SVBool_tu12__SVUint64_tS0_(
80 // CPP-CHECK-NEXT: entry:
81 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
82 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
83 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
84 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
86 svuint64_t test_svlsr_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
88 return SVE_ACLE_FUNC(svlsr,_u64,_z,)(pg, op1, op2);
91 // CHECK-LABEL: @test_svlsr_u8_m(
92 // CHECK-NEXT: entry:
93 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
94 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
96 // CPP-CHECK-LABEL: @_Z15test_svlsr_u8_mu10__SVBool_tu11__SVUint8_tS0_(
97 // CPP-CHECK-NEXT: entry:
98 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
99 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
101 svuint8_t test_svlsr_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
103 return SVE_ACLE_FUNC(svlsr,_u8,_m,)(pg, op1, op2);
106 // CHECK-LABEL: @test_svlsr_u16_m(
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
109 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
110 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
112 // CPP-CHECK-LABEL: @_Z16test_svlsr_u16_mu10__SVBool_tu12__SVUint16_tS0_(
113 // CPP-CHECK-NEXT: entry:
114 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
115 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
116 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
118 svuint16_t test_svlsr_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
120 return SVE_ACLE_FUNC(svlsr,_u16,_m,)(pg, op1, op2);
123 // CHECK-LABEL: @test_svlsr_u32_m(
124 // CHECK-NEXT: entry:
125 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
126 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
127 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
129 // CPP-CHECK-LABEL: @_Z16test_svlsr_u32_mu10__SVBool_tu12__SVUint32_tS0_(
130 // CPP-CHECK-NEXT: entry:
131 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
132 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
133 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
135 svuint32_t test_svlsr_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
137 return SVE_ACLE_FUNC(svlsr,_u32,_m,)(pg, op1, op2);
140 // CHECK-LABEL: @test_svlsr_u64_m(
141 // CHECK-NEXT: entry:
142 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
143 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
144 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
146 // CPP-CHECK-LABEL: @_Z16test_svlsr_u64_mu10__SVBool_tu12__SVUint64_tS0_(
147 // CPP-CHECK-NEXT: entry:
148 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
149 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
150 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
152 svuint64_t test_svlsr_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
154 return SVE_ACLE_FUNC(svlsr,_u64,_m,)(pg, op1, op2);
157 // CHECK-LABEL: @test_svlsr_u8_x(
158 // CHECK-NEXT: entry:
159 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
160 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
162 // CPP-CHECK-LABEL: @_Z15test_svlsr_u8_xu10__SVBool_tu11__SVUint8_tS0_(
163 // CPP-CHECK-NEXT: entry:
164 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
165 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
167 svuint8_t test_svlsr_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
169 return SVE_ACLE_FUNC(svlsr,_u8,_x,)(pg, op1, op2);
172 // CHECK-LABEL: @test_svlsr_u16_x(
173 // CHECK-NEXT: entry:
174 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
175 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
176 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
178 // CPP-CHECK-LABEL: @_Z16test_svlsr_u16_xu10__SVBool_tu12__SVUint16_tS0_(
179 // CPP-CHECK-NEXT: entry:
180 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
181 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
182 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
184 svuint16_t test_svlsr_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
186 return SVE_ACLE_FUNC(svlsr,_u16,_x,)(pg, op1, op2);
189 // CHECK-LABEL: @test_svlsr_u32_x(
190 // CHECK-NEXT: entry:
191 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
192 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
193 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
195 // CPP-CHECK-LABEL: @_Z16test_svlsr_u32_xu10__SVBool_tu12__SVUint32_tS0_(
196 // CPP-CHECK-NEXT: entry:
197 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
198 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
199 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
201 svuint32_t test_svlsr_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
203 return SVE_ACLE_FUNC(svlsr,_u32,_x,)(pg, op1, op2);
206 // CHECK-LABEL: @test_svlsr_u64_x(
207 // CHECK-NEXT: entry:
208 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
209 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
210 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
212 // CPP-CHECK-LABEL: @_Z16test_svlsr_u64_xu10__SVBool_tu12__SVUint64_tS0_(
213 // CPP-CHECK-NEXT: entry:
214 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
215 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
216 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
218 svuint64_t test_svlsr_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
220 return SVE_ACLE_FUNC(svlsr,_u64,_x,)(pg, op1, op2);
223 // CHECK-LABEL: @test_svlsr_wide_u8_z(
224 // CHECK-NEXT: entry:
225 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
226 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
227 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
229 // CPP-CHECK-LABEL: @_Z20test_svlsr_wide_u8_zu10__SVBool_tu11__SVUint8_tu12__SVUint64_t(
230 // CPP-CHECK-NEXT: entry:
231 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
232 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
233 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
235 svuint8_t test_svlsr_wide_u8_z(svbool_t pg, svuint8_t op1, svuint64_t op2)
237 return SVE_ACLE_FUNC(svlsr_wide,_u8,_z,)(pg, op1, op2);
240 // CHECK-LABEL: @test_svlsr_wide_u16_z(
241 // CHECK-NEXT: entry:
242 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
243 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
244 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
245 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
247 // CPP-CHECK-LABEL: @_Z21test_svlsr_wide_u16_zu10__SVBool_tu12__SVUint16_tu12__SVUint64_t(
248 // CPP-CHECK-NEXT: entry:
249 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
250 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
251 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
252 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
254 svuint16_t test_svlsr_wide_u16_z(svbool_t pg, svuint16_t op1, svuint64_t op2)
256 return SVE_ACLE_FUNC(svlsr_wide,_u16,_z,)(pg, op1, op2);
259 // CHECK-LABEL: @test_svlsr_wide_u32_z(
260 // CHECK-NEXT: entry:
261 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
262 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
263 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
264 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
266 // CPP-CHECK-LABEL: @_Z21test_svlsr_wide_u32_zu10__SVBool_tu12__SVUint32_tu12__SVUint64_t(
267 // CPP-CHECK-NEXT: entry:
268 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
269 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
270 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
271 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
273 svuint32_t test_svlsr_wide_u32_z(svbool_t pg, svuint32_t op1, svuint64_t op2)
275 return SVE_ACLE_FUNC(svlsr_wide,_u32,_z,)(pg, op1, op2);
278 // CHECK-LABEL: @test_svlsr_wide_u8_m(
279 // CHECK-NEXT: entry:
280 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
281 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
283 // CPP-CHECK-LABEL: @_Z20test_svlsr_wide_u8_mu10__SVBool_tu11__SVUint8_tu12__SVUint64_t(
284 // CPP-CHECK-NEXT: entry:
285 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
286 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
288 svuint8_t test_svlsr_wide_u8_m(svbool_t pg, svuint8_t op1, svuint64_t op2)
290 return SVE_ACLE_FUNC(svlsr_wide,_u8,_m,)(pg, op1, op2);
293 // CHECK-LABEL: @test_svlsr_wide_u16_m(
294 // CHECK-NEXT: entry:
295 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
296 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
297 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
299 // CPP-CHECK-LABEL: @_Z21test_svlsr_wide_u16_mu10__SVBool_tu12__SVUint16_tu12__SVUint64_t(
300 // CPP-CHECK-NEXT: entry:
301 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
302 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
303 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
305 svuint16_t test_svlsr_wide_u16_m(svbool_t pg, svuint16_t op1, svuint64_t op2)
307 return SVE_ACLE_FUNC(svlsr_wide,_u16,_m,)(pg, op1, op2);
310 // CHECK-LABEL: @test_svlsr_wide_u32_m(
311 // CHECK-NEXT: entry:
312 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
313 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
314 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
316 // CPP-CHECK-LABEL: @_Z21test_svlsr_wide_u32_mu10__SVBool_tu12__SVUint32_tu12__SVUint64_t(
317 // CPP-CHECK-NEXT: entry:
318 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
319 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
320 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
322 svuint32_t test_svlsr_wide_u32_m(svbool_t pg, svuint32_t op1, svuint64_t op2)
324 return SVE_ACLE_FUNC(svlsr_wide,_u32,_m,)(pg, op1, op2);
327 // CHECK-LABEL: @test_svlsr_wide_u8_x(
328 // CHECK-NEXT: entry:
329 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
330 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
332 // CPP-CHECK-LABEL: @_Z20test_svlsr_wide_u8_xu10__SVBool_tu11__SVUint8_tu12__SVUint64_t(
333 // CPP-CHECK-NEXT: entry:
334 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
335 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
337 svuint8_t test_svlsr_wide_u8_x(svbool_t pg, svuint8_t op1, svuint64_t op2)
339 return SVE_ACLE_FUNC(svlsr_wide,_u8,_x,)(pg, op1, op2);
342 // CHECK-LABEL: @test_svlsr_wide_u16_x(
343 // CHECK-NEXT: entry:
344 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
345 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
346 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
348 // CPP-CHECK-LABEL: @_Z21test_svlsr_wide_u16_xu10__SVBool_tu12__SVUint16_tu12__SVUint64_t(
349 // CPP-CHECK-NEXT: entry:
350 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
351 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
352 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
354 svuint16_t test_svlsr_wide_u16_x(svbool_t pg, svuint16_t op1, svuint64_t op2)
356 return SVE_ACLE_FUNC(svlsr_wide,_u16,_x,)(pg, op1, op2);
359 // CHECK-LABEL: @test_svlsr_wide_u32_x(
360 // CHECK-NEXT: entry:
361 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
362 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
363 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
365 // CPP-CHECK-LABEL: @_Z21test_svlsr_wide_u32_xu10__SVBool_tu12__SVUint32_tu12__SVUint64_t(
366 // CPP-CHECK-NEXT: entry:
367 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
368 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
369 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
371 svuint32_t test_svlsr_wide_u32_x(svbool_t pg, svuint32_t op1, svuint64_t op2)
373 return SVE_ACLE_FUNC(svlsr_wide,_u32,_x,)(pg, op1, op2);
376 // CHECK-LABEL: @test_svlsr_wide_n_u8_m(
377 // CHECK-NEXT: entry:
378 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
379 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
380 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
381 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
383 // CPP-CHECK-LABEL: @_Z22test_svlsr_wide_n_u8_mu10__SVBool_tu11__SVUint8_tm(
384 // CPP-CHECK-NEXT: entry:
385 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
386 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
387 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
388 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
390 svuint8_t test_svlsr_wide_n_u8_m(svbool_t pg, svuint8_t op1, uint64_t op2)
392 return SVE_ACLE_FUNC(svlsr_wide,_n_u8,_m,)(pg, op1, op2);
395 // CHECK-LABEL: @test_svlsr_wide_n_u16_m(
396 // CHECK-NEXT: entry:
397 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
398 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
399 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
400 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
401 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
403 // CPP-CHECK-LABEL: @_Z23test_svlsr_wide_n_u16_mu10__SVBool_tu12__SVUint16_tm(
404 // CPP-CHECK-NEXT: entry:
405 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
406 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
407 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
408 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
409 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
411 svuint16_t test_svlsr_wide_n_u16_m(svbool_t pg, svuint16_t op1, uint64_t op2)
413 return SVE_ACLE_FUNC(svlsr_wide,_n_u16,_m,)(pg, op1, op2);
416 // CHECK-LABEL: @test_svlsr_wide_n_u32_m(
417 // CHECK-NEXT: entry:
418 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
419 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
420 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
421 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
422 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
424 // CPP-CHECK-LABEL: @_Z23test_svlsr_wide_n_u32_mu10__SVBool_tu12__SVUint32_tm(
425 // CPP-CHECK-NEXT: entry:
426 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
427 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
428 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
429 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
430 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
432 svuint32_t test_svlsr_wide_n_u32_m(svbool_t pg, svuint32_t op1, uint64_t op2)
434 return SVE_ACLE_FUNC(svlsr_wide,_n_u32,_m,)(pg, op1, op2);
437 // CHECK-LABEL: @test_svlsr_wide_n_u8_z(
438 // CHECK-NEXT: entry:
439 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
440 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
441 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
442 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[DOTSPLAT]])
443 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
445 // CPP-CHECK-LABEL: @_Z22test_svlsr_wide_n_u8_zu10__SVBool_tu11__SVUint8_tm(
446 // CPP-CHECK-NEXT: entry:
447 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
448 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
449 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
450 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[DOTSPLAT]])
451 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
453 svuint8_t test_svlsr_wide_n_u8_z(svbool_t pg, svuint8_t op1, uint64_t op2)
455 return SVE_ACLE_FUNC(svlsr_wide,_n_u8,_z,)(pg, op1, op2);
458 // CHECK-LABEL: @test_svlsr_wide_n_u16_z(
459 // CHECK-NEXT: entry:
460 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
461 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
462 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
463 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
464 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
465 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
467 // CPP-CHECK-LABEL: @_Z23test_svlsr_wide_n_u16_zu10__SVBool_tu12__SVUint16_tm(
468 // CPP-CHECK-NEXT: entry:
469 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
470 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
471 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
472 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
473 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
474 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
476 svuint16_t test_svlsr_wide_n_u16_z(svbool_t pg, svuint16_t op1, uint64_t op2)
478 return SVE_ACLE_FUNC(svlsr_wide,_n_u16,_z,)(pg, op1, op2);
481 // CHECK-LABEL: @test_svlsr_wide_n_u32_z(
482 // CHECK-NEXT: entry:
483 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
484 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
485 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
486 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
487 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
488 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
490 // CPP-CHECK-LABEL: @_Z23test_svlsr_wide_n_u32_zu10__SVBool_tu12__SVUint32_tm(
491 // CPP-CHECK-NEXT: entry:
492 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
493 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
494 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
495 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
496 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
497 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
499 svuint32_t test_svlsr_wide_n_u32_z(svbool_t pg, svuint32_t op1, uint64_t op2)
501 return SVE_ACLE_FUNC(svlsr_wide,_n_u32,_z,)(pg, op1, op2);
504 // CHECK-LABEL: @test_svlsr_wide_n_u8_x(
505 // CHECK-NEXT: entry:
506 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
507 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
508 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
509 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
511 // CPP-CHECK-LABEL: @_Z22test_svlsr_wide_n_u8_xu10__SVBool_tu11__SVUint8_tm(
512 // CPP-CHECK-NEXT: entry:
513 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
514 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
515 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
516 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
518 svuint8_t test_svlsr_wide_n_u8_x(svbool_t pg, svuint8_t op1, uint64_t op2)
520 return SVE_ACLE_FUNC(svlsr_wide,_n_u8,_x,)(pg, op1, op2);
523 // CHECK-LABEL: @test_svlsr_wide_n_u16_x(
524 // CHECK-NEXT: entry:
525 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
526 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
527 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
528 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
529 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
531 // CPP-CHECK-LABEL: @_Z23test_svlsr_wide_n_u16_xu10__SVBool_tu12__SVUint16_tm(
532 // CPP-CHECK-NEXT: entry:
533 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
534 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
535 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
536 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
537 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
539 svuint16_t test_svlsr_wide_n_u16_x(svbool_t pg, svuint16_t op1, uint64_t op2)
541 return SVE_ACLE_FUNC(svlsr_wide,_n_u16,_x,)(pg, op1, op2);
544 // CHECK-LABEL: @test_svlsr_wide_n_u32_x(
545 // CHECK-NEXT: entry:
546 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
547 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
548 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
549 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
550 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
552 // CPP-CHECK-LABEL: @_Z23test_svlsr_wide_n_u32_xu10__SVBool_tu12__SVUint32_tm(
553 // CPP-CHECK-NEXT: entry:
554 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
555 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
556 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
557 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
558 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
560 svuint32_t test_svlsr_wide_n_u32_x(svbool_t pg, svuint32_t op1, uint64_t op2)
562 return SVE_ACLE_FUNC(svlsr_wide,_n_u32,_x,)(pg, op1, op2);