Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / aarch64-sve-intrinsics / acle_sve_lsl.c
blob06b302c234880b120e88cc0b850c8dc20f543761
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
8 #include <arm_sve.h>
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
13 #else
14 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
15 #endif
17 // CHECK-LABEL: @test_svlsl_s8_z(
18 // CHECK-NEXT: entry:
19 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
20 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
21 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
23 // CPP-CHECK-LABEL: @_Z15test_svlsl_s8_zu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
24 // CPP-CHECK-NEXT: entry:
25 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
26 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
27 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
29 svint8_t test_svlsl_s8_z(svbool_t pg, svint8_t op1, svuint8_t op2)
31 return SVE_ACLE_FUNC(svlsl,_s8,_z,)(pg, op1, op2);
34 // CHECK-LABEL: @test_svlsl_s16_z(
35 // CHECK-NEXT: entry:
36 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
37 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
38 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
39 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
41 // CPP-CHECK-LABEL: @_Z16test_svlsl_s16_zu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
42 // CPP-CHECK-NEXT: entry:
43 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
44 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
45 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
46 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
48 svint16_t test_svlsl_s16_z(svbool_t pg, svint16_t op1, svuint16_t op2)
50 return SVE_ACLE_FUNC(svlsl,_s16,_z,)(pg, op1, op2);
53 // CHECK-LABEL: @test_svlsl_s32_z(
54 // CHECK-NEXT: entry:
55 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
56 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
57 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
58 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
60 // CPP-CHECK-LABEL: @_Z16test_svlsl_s32_zu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
61 // CPP-CHECK-NEXT: entry:
62 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
63 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
64 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
65 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
67 svint32_t test_svlsl_s32_z(svbool_t pg, svint32_t op1, svuint32_t op2)
69 return SVE_ACLE_FUNC(svlsl,_s32,_z,)(pg, op1, op2);
72 // CHECK-LABEL: @test_svlsl_s64_z(
73 // CHECK-NEXT: entry:
74 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
75 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
76 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
77 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
79 // CPP-CHECK-LABEL: @_Z16test_svlsl_s64_zu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
80 // CPP-CHECK-NEXT: entry:
81 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
82 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
83 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
84 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
86 svint64_t test_svlsl_s64_z(svbool_t pg, svint64_t op1, svuint64_t op2)
88 return SVE_ACLE_FUNC(svlsl,_s64,_z,)(pg, op1, op2);
91 // CHECK-LABEL: @test_svlsl_u8_z(
92 // CHECK-NEXT: entry:
93 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
94 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
95 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
97 // CPP-CHECK-LABEL: @_Z15test_svlsl_u8_zu10__SVBool_tu11__SVUint8_tS0_(
98 // CPP-CHECK-NEXT: entry:
99 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
100 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
101 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
103 svuint8_t test_svlsl_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
105 return SVE_ACLE_FUNC(svlsl,_u8,_z,)(pg, op1, op2);
108 // CHECK-LABEL: @test_svlsl_u16_z(
109 // CHECK-NEXT: entry:
110 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
111 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
112 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
113 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
115 // CPP-CHECK-LABEL: @_Z16test_svlsl_u16_zu10__SVBool_tu12__SVUint16_tS0_(
116 // CPP-CHECK-NEXT: entry:
117 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
118 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
119 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
120 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
122 svuint16_t test_svlsl_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
124 return SVE_ACLE_FUNC(svlsl,_u16,_z,)(pg, op1, op2);
127 // CHECK-LABEL: @test_svlsl_u32_z(
128 // CHECK-NEXT: entry:
129 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
130 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
131 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
132 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
134 // CPP-CHECK-LABEL: @_Z16test_svlsl_u32_zu10__SVBool_tu12__SVUint32_tS0_(
135 // CPP-CHECK-NEXT: entry:
136 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
137 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
138 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
139 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
141 svuint32_t test_svlsl_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
143 return SVE_ACLE_FUNC(svlsl,_u32,_z,)(pg, op1, op2);
146 // CHECK-LABEL: @test_svlsl_u64_z(
147 // CHECK-NEXT: entry:
148 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
149 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
150 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
151 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
153 // CPP-CHECK-LABEL: @_Z16test_svlsl_u64_zu10__SVBool_tu12__SVUint64_tS0_(
154 // CPP-CHECK-NEXT: entry:
155 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
156 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
157 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
158 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
160 svuint64_t test_svlsl_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
162 return SVE_ACLE_FUNC(svlsl,_u64,_z,)(pg, op1, op2);
165 // CHECK-LABEL: @test_svlsl_s8_m(
166 // CHECK-NEXT: entry:
167 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
168 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
170 // CPP-CHECK-LABEL: @_Z15test_svlsl_s8_mu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
171 // CPP-CHECK-NEXT: entry:
172 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
173 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
175 svint8_t test_svlsl_s8_m(svbool_t pg, svint8_t op1, svuint8_t op2)
177 return SVE_ACLE_FUNC(svlsl,_s8,_m,)(pg, op1, op2);
180 // CHECK-LABEL: @test_svlsl_s16_m(
181 // CHECK-NEXT: entry:
182 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
183 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
184 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
186 // CPP-CHECK-LABEL: @_Z16test_svlsl_s16_mu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
187 // CPP-CHECK-NEXT: entry:
188 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
189 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
190 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
192 svint16_t test_svlsl_s16_m(svbool_t pg, svint16_t op1, svuint16_t op2)
194 return SVE_ACLE_FUNC(svlsl,_s16,_m,)(pg, op1, op2);
197 // CHECK-LABEL: @test_svlsl_s32_m(
198 // CHECK-NEXT: entry:
199 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
200 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
201 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
203 // CPP-CHECK-LABEL: @_Z16test_svlsl_s32_mu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
204 // CPP-CHECK-NEXT: entry:
205 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
206 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
207 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
209 svint32_t test_svlsl_s32_m(svbool_t pg, svint32_t op1, svuint32_t op2)
211 return SVE_ACLE_FUNC(svlsl,_s32,_m,)(pg, op1, op2);
214 // CHECK-LABEL: @test_svlsl_s64_m(
215 // CHECK-NEXT: entry:
216 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
217 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
218 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
220 // CPP-CHECK-LABEL: @_Z16test_svlsl_s64_mu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
221 // CPP-CHECK-NEXT: entry:
222 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
223 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
224 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
226 svint64_t test_svlsl_s64_m(svbool_t pg, svint64_t op1, svuint64_t op2)
228 return SVE_ACLE_FUNC(svlsl,_s64,_m,)(pg, op1, op2);
231 // CHECK-LABEL: @test_svlsl_u8_m(
232 // CHECK-NEXT: entry:
233 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
234 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
236 // CPP-CHECK-LABEL: @_Z15test_svlsl_u8_mu10__SVBool_tu11__SVUint8_tS0_(
237 // CPP-CHECK-NEXT: entry:
238 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
239 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
241 svuint8_t test_svlsl_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
243 return SVE_ACLE_FUNC(svlsl,_u8,_m,)(pg, op1, op2);
246 // CHECK-LABEL: @test_svlsl_u16_m(
247 // CHECK-NEXT: entry:
248 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
249 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
250 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
252 // CPP-CHECK-LABEL: @_Z16test_svlsl_u16_mu10__SVBool_tu12__SVUint16_tS0_(
253 // CPP-CHECK-NEXT: entry:
254 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
255 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
256 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
258 svuint16_t test_svlsl_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
260 return SVE_ACLE_FUNC(svlsl,_u16,_m,)(pg, op1, op2);
263 // CHECK-LABEL: @test_svlsl_u32_m(
264 // CHECK-NEXT: entry:
265 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
266 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
267 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
269 // CPP-CHECK-LABEL: @_Z16test_svlsl_u32_mu10__SVBool_tu12__SVUint32_tS0_(
270 // CPP-CHECK-NEXT: entry:
271 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
272 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
273 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
275 svuint32_t test_svlsl_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
277 return SVE_ACLE_FUNC(svlsl,_u32,_m,)(pg, op1, op2);
280 // CHECK-LABEL: @test_svlsl_u64_m(
281 // CHECK-NEXT: entry:
282 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
283 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
284 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
286 // CPP-CHECK-LABEL: @_Z16test_svlsl_u64_mu10__SVBool_tu12__SVUint64_tS0_(
287 // CPP-CHECK-NEXT: entry:
288 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
289 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
290 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
292 svuint64_t test_svlsl_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
294 return SVE_ACLE_FUNC(svlsl,_u64,_m,)(pg, op1, op2);
297 // CHECK-LABEL: @test_svlsl_s8_x(
298 // CHECK-NEXT: entry:
299 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
300 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
302 // CPP-CHECK-LABEL: @_Z15test_svlsl_s8_xu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
303 // CPP-CHECK-NEXT: entry:
304 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
305 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
307 svint8_t test_svlsl_s8_x(svbool_t pg, svint8_t op1, svuint8_t op2)
309 return SVE_ACLE_FUNC(svlsl,_s8,_x,)(pg, op1, op2);
312 // CHECK-LABEL: @test_svlsl_s16_x(
313 // CHECK-NEXT: entry:
314 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
315 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
316 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
318 // CPP-CHECK-LABEL: @_Z16test_svlsl_s16_xu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
319 // CPP-CHECK-NEXT: entry:
320 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
321 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
322 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
324 svint16_t test_svlsl_s16_x(svbool_t pg, svint16_t op1, svuint16_t op2)
326 return SVE_ACLE_FUNC(svlsl,_s16,_x,)(pg, op1, op2);
329 // CHECK-LABEL: @test_svlsl_s32_x(
330 // CHECK-NEXT: entry:
331 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
332 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
333 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
335 // CPP-CHECK-LABEL: @_Z16test_svlsl_s32_xu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
336 // CPP-CHECK-NEXT: entry:
337 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
338 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
339 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
341 svint32_t test_svlsl_s32_x(svbool_t pg, svint32_t op1, svuint32_t op2)
343 return SVE_ACLE_FUNC(svlsl,_s32,_x,)(pg, op1, op2);
346 // CHECK-LABEL: @test_svlsl_s64_x(
347 // CHECK-NEXT: entry:
348 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
349 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
350 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
352 // CPP-CHECK-LABEL: @_Z16test_svlsl_s64_xu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
353 // CPP-CHECK-NEXT: entry:
354 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
355 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
356 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
358 svint64_t test_svlsl_s64_x(svbool_t pg, svint64_t op1, svuint64_t op2)
360 return SVE_ACLE_FUNC(svlsl,_s64,_x,)(pg, op1, op2);
363 // CHECK-LABEL: @test_svlsl_u8_x(
364 // CHECK-NEXT: entry:
365 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
366 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
368 // CPP-CHECK-LABEL: @_Z15test_svlsl_u8_xu10__SVBool_tu11__SVUint8_tS0_(
369 // CPP-CHECK-NEXT: entry:
370 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
371 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
373 svuint8_t test_svlsl_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
375 return SVE_ACLE_FUNC(svlsl,_u8,_x,)(pg, op1, op2);
378 // CHECK-LABEL: @test_svlsl_u16_x(
379 // CHECK-NEXT: entry:
380 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
381 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
382 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
384 // CPP-CHECK-LABEL: @_Z16test_svlsl_u16_xu10__SVBool_tu12__SVUint16_tS0_(
385 // CPP-CHECK-NEXT: entry:
386 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
387 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
388 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
390 svuint16_t test_svlsl_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
392 return SVE_ACLE_FUNC(svlsl,_u16,_x,)(pg, op1, op2);
395 // CHECK-LABEL: @test_svlsl_u32_x(
396 // CHECK-NEXT: entry:
397 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
398 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
399 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
401 // CPP-CHECK-LABEL: @_Z16test_svlsl_u32_xu10__SVBool_tu12__SVUint32_tS0_(
402 // CPP-CHECK-NEXT: entry:
403 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
404 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
405 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
407 svuint32_t test_svlsl_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
409 return SVE_ACLE_FUNC(svlsl,_u32,_x,)(pg, op1, op2);
412 // CHECK-LABEL: @test_svlsl_u64_x(
413 // CHECK-NEXT: entry:
414 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
415 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
416 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
418 // CPP-CHECK-LABEL: @_Z16test_svlsl_u64_xu10__SVBool_tu12__SVUint64_tS0_(
419 // CPP-CHECK-NEXT: entry:
420 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
421 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.lsl.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
422 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
424 svuint64_t test_svlsl_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
426 return SVE_ACLE_FUNC(svlsl,_u64,_x,)(pg, op1, op2);
429 // CHECK-LABEL: @test_svlsl_wide_s8_z(
430 // CHECK-NEXT: entry:
431 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
432 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
433 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
435 // CPP-CHECK-LABEL: @_Z20test_svlsl_wide_s8_zu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
436 // CPP-CHECK-NEXT: entry:
437 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
438 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
439 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
441 svint8_t test_svlsl_wide_s8_z(svbool_t pg, svint8_t op1, svuint64_t op2)
443 return SVE_ACLE_FUNC(svlsl_wide,_s8,_z,)(pg, op1, op2);
446 // CHECK-LABEL: @test_svlsl_wide_s16_z(
447 // CHECK-NEXT: entry:
448 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
449 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
450 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
451 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
453 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_s16_zu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
454 // CPP-CHECK-NEXT: entry:
455 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
456 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
457 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
458 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
460 svint16_t test_svlsl_wide_s16_z(svbool_t pg, svint16_t op1, svuint64_t op2)
462 return SVE_ACLE_FUNC(svlsl_wide,_s16,_z,)(pg, op1, op2);
465 // CHECK-LABEL: @test_svlsl_wide_s32_z(
466 // CHECK-NEXT: entry:
467 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
468 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
469 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
470 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
472 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_s32_zu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
473 // CPP-CHECK-NEXT: entry:
474 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
475 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
476 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
477 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
479 svint32_t test_svlsl_wide_s32_z(svbool_t pg, svint32_t op1, svuint64_t op2)
481 return SVE_ACLE_FUNC(svlsl_wide,_s32,_z,)(pg, op1, op2);
484 // CHECK-LABEL: @test_svlsl_wide_u8_z(
485 // CHECK-NEXT: entry:
486 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
487 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
488 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
490 // CPP-CHECK-LABEL: @_Z20test_svlsl_wide_u8_zu10__SVBool_tu11__SVUint8_tu12__SVUint64_t(
491 // CPP-CHECK-NEXT: entry:
492 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
493 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
494 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
496 svuint8_t test_svlsl_wide_u8_z(svbool_t pg, svuint8_t op1, svuint64_t op2)
498 return SVE_ACLE_FUNC(svlsl_wide,_u8,_z,)(pg, op1, op2);
501 // CHECK-LABEL: @test_svlsl_wide_u16_z(
502 // CHECK-NEXT: entry:
503 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
504 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
505 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
506 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
508 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_u16_zu10__SVBool_tu12__SVUint16_tu12__SVUint64_t(
509 // CPP-CHECK-NEXT: entry:
510 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
511 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
512 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
513 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
515 svuint16_t test_svlsl_wide_u16_z(svbool_t pg, svuint16_t op1, svuint64_t op2)
517 return SVE_ACLE_FUNC(svlsl_wide,_u16,_z,)(pg, op1, op2);
520 // CHECK-LABEL: @test_svlsl_wide_u32_z(
521 // CHECK-NEXT: entry:
522 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
523 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
524 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
525 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
527 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_u32_zu10__SVBool_tu12__SVUint32_tu12__SVUint64_t(
528 // CPP-CHECK-NEXT: entry:
529 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
530 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
531 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
532 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
534 svuint32_t test_svlsl_wide_u32_z(svbool_t pg, svuint32_t op1, svuint64_t op2)
536 return SVE_ACLE_FUNC(svlsl_wide,_u32,_z,)(pg, op1, op2);
539 // CHECK-LABEL: @test_svlsl_wide_s8_m(
540 // CHECK-NEXT: entry:
541 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
542 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
544 // CPP-CHECK-LABEL: @_Z20test_svlsl_wide_s8_mu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
545 // CPP-CHECK-NEXT: entry:
546 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
547 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
549 svint8_t test_svlsl_wide_s8_m(svbool_t pg, svint8_t op1, svuint64_t op2)
551 return SVE_ACLE_FUNC(svlsl_wide,_s8,_m,)(pg, op1, op2);
554 // CHECK-LABEL: @test_svlsl_wide_s16_m(
555 // CHECK-NEXT: entry:
556 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
557 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
558 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
560 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_s16_mu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
561 // CPP-CHECK-NEXT: entry:
562 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
563 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
564 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
566 svint16_t test_svlsl_wide_s16_m(svbool_t pg, svint16_t op1, svuint64_t op2)
568 return SVE_ACLE_FUNC(svlsl_wide,_s16,_m,)(pg, op1, op2);
571 // CHECK-LABEL: @test_svlsl_wide_s32_m(
572 // CHECK-NEXT: entry:
573 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
574 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
575 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
577 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_s32_mu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
578 // CPP-CHECK-NEXT: entry:
579 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
580 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
581 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
583 svint32_t test_svlsl_wide_s32_m(svbool_t pg, svint32_t op1, svuint64_t op2)
585 return SVE_ACLE_FUNC(svlsl_wide,_s32,_m,)(pg, op1, op2);
588 // CHECK-LABEL: @test_svlsl_wide_u8_m(
589 // CHECK-NEXT: entry:
590 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
591 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
593 // CPP-CHECK-LABEL: @_Z20test_svlsl_wide_u8_mu10__SVBool_tu11__SVUint8_tu12__SVUint64_t(
594 // CPP-CHECK-NEXT: entry:
595 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
596 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
598 svuint8_t test_svlsl_wide_u8_m(svbool_t pg, svuint8_t op1, svuint64_t op2)
600 return SVE_ACLE_FUNC(svlsl_wide,_u8,_m,)(pg, op1, op2);
603 // CHECK-LABEL: @test_svlsl_wide_u16_m(
604 // CHECK-NEXT: entry:
605 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
606 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
607 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
609 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_u16_mu10__SVBool_tu12__SVUint16_tu12__SVUint64_t(
610 // CPP-CHECK-NEXT: entry:
611 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
612 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
613 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
615 svuint16_t test_svlsl_wide_u16_m(svbool_t pg, svuint16_t op1, svuint64_t op2)
617 return SVE_ACLE_FUNC(svlsl_wide,_u16,_m,)(pg, op1, op2);
620 // CHECK-LABEL: @test_svlsl_wide_u32_m(
621 // CHECK-NEXT: entry:
622 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
623 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
624 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
626 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_u32_mu10__SVBool_tu12__SVUint32_tu12__SVUint64_t(
627 // CPP-CHECK-NEXT: entry:
628 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
629 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
630 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
632 svuint32_t test_svlsl_wide_u32_m(svbool_t pg, svuint32_t op1, svuint64_t op2)
634 return SVE_ACLE_FUNC(svlsl_wide,_u32,_m,)(pg, op1, op2);
637 // CHECK-LABEL: @test_svlsl_wide_s8_x(
638 // CHECK-NEXT: entry:
639 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
640 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
642 // CPP-CHECK-LABEL: @_Z20test_svlsl_wide_s8_xu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
643 // CPP-CHECK-NEXT: entry:
644 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
645 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
647 svint8_t test_svlsl_wide_s8_x(svbool_t pg, svint8_t op1, svuint64_t op2)
649 return SVE_ACLE_FUNC(svlsl_wide,_s8,_x,)(pg, op1, op2);
652 // CHECK-LABEL: @test_svlsl_wide_s16_x(
653 // CHECK-NEXT: entry:
654 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
655 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
656 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
658 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_s16_xu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
659 // CPP-CHECK-NEXT: entry:
660 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
661 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
662 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
664 svint16_t test_svlsl_wide_s16_x(svbool_t pg, svint16_t op1, svuint64_t op2)
666 return SVE_ACLE_FUNC(svlsl_wide,_s16,_x,)(pg, op1, op2);
669 // CHECK-LABEL: @test_svlsl_wide_s32_x(
670 // CHECK-NEXT: entry:
671 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
672 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
673 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
675 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_s32_xu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
676 // CPP-CHECK-NEXT: entry:
677 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
678 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
679 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
681 svint32_t test_svlsl_wide_s32_x(svbool_t pg, svint32_t op1, svuint64_t op2)
683 return SVE_ACLE_FUNC(svlsl_wide,_s32,_x,)(pg, op1, op2);
686 // CHECK-LABEL: @test_svlsl_wide_u8_x(
687 // CHECK-NEXT: entry:
688 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
689 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
691 // CPP-CHECK-LABEL: @_Z20test_svlsl_wide_u8_xu10__SVBool_tu11__SVUint8_tu12__SVUint64_t(
692 // CPP-CHECK-NEXT: entry:
693 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
694 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
696 svuint8_t test_svlsl_wide_u8_x(svbool_t pg, svuint8_t op1, svuint64_t op2)
698 return SVE_ACLE_FUNC(svlsl_wide,_u8,_x,)(pg, op1, op2);
701 // CHECK-LABEL: @test_svlsl_wide_u16_x(
702 // CHECK-NEXT: entry:
703 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
704 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
705 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
707 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_u16_xu10__SVBool_tu12__SVUint16_tu12__SVUint64_t(
708 // CPP-CHECK-NEXT: entry:
709 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
710 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
711 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
713 svuint16_t test_svlsl_wide_u16_x(svbool_t pg, svuint16_t op1, svuint64_t op2)
715 return SVE_ACLE_FUNC(svlsl_wide,_u16,_x,)(pg, op1, op2);
718 // CHECK-LABEL: @test_svlsl_wide_u32_x(
719 // CHECK-NEXT: entry:
720 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
721 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
722 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
724 // CPP-CHECK-LABEL: @_Z21test_svlsl_wide_u32_xu10__SVBool_tu12__SVUint32_tu12__SVUint64_t(
725 // CPP-CHECK-NEXT: entry:
726 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
727 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
728 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
730 svuint32_t test_svlsl_wide_u32_x(svbool_t pg, svuint32_t op1, svuint64_t op2)
732 return SVE_ACLE_FUNC(svlsl_wide,_u32,_x,)(pg, op1, op2);
735 // CHECK-LABEL: @test_svlsl_wide_n_s8_m(
736 // CHECK-NEXT: entry:
737 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
738 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
739 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
740 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
742 // CPP-CHECK-LABEL: @_Z22test_svlsl_wide_n_s8_mu10__SVBool_tu10__SVInt8_tm(
743 // CPP-CHECK-NEXT: entry:
744 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
745 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
746 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
747 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
749 svint8_t test_svlsl_wide_n_s8_m(svbool_t pg, svint8_t op1, uint64_t op2)
751 return SVE_ACLE_FUNC(svlsl_wide,_n_s8,_m,)(pg, op1, op2);
754 // CHECK-LABEL: @test_svlsl_wide_n_s16_m(
755 // CHECK-NEXT: entry:
756 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
757 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
758 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
759 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
760 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
762 // CPP-CHECK-LABEL: @_Z23test_svlsl_wide_n_s16_mu10__SVBool_tu11__SVInt16_tm(
763 // CPP-CHECK-NEXT: entry:
764 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
765 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
766 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
767 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
768 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
770 svint16_t test_svlsl_wide_n_s16_m(svbool_t pg, svint16_t op1, uint64_t op2)
772 return SVE_ACLE_FUNC(svlsl_wide,_n_s16,_m,)(pg, op1, op2);
775 // CHECK-LABEL: @test_svlsl_wide_n_s32_m(
776 // CHECK-NEXT: entry:
777 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
778 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
779 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
780 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
781 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
783 // CPP-CHECK-LABEL: @_Z23test_svlsl_wide_n_s32_mu10__SVBool_tu11__SVInt32_tm(
784 // CPP-CHECK-NEXT: entry:
785 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
786 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
787 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
788 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
789 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
791 svint32_t test_svlsl_wide_n_s32_m(svbool_t pg, svint32_t op1, uint64_t op2)
793 return SVE_ACLE_FUNC(svlsl_wide,_n_s32,_m,)(pg, op1, op2);
796 // CHECK-LABEL: @test_svlsl_wide_n_s8_z(
797 // CHECK-NEXT: entry:
798 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
799 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
800 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
801 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[DOTSPLAT]])
802 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
804 // CPP-CHECK-LABEL: @_Z22test_svlsl_wide_n_s8_zu10__SVBool_tu10__SVInt8_tm(
805 // CPP-CHECK-NEXT: entry:
806 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
807 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
808 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
809 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[DOTSPLAT]])
810 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
812 svint8_t test_svlsl_wide_n_s8_z(svbool_t pg, svint8_t op1, uint64_t op2)
814 return SVE_ACLE_FUNC(svlsl_wide,_n_s8,_z,)(pg, op1, op2);
817 // CHECK-LABEL: @test_svlsl_wide_n_s16_z(
818 // CHECK-NEXT: entry:
819 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
820 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
821 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
822 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
823 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
824 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
826 // CPP-CHECK-LABEL: @_Z23test_svlsl_wide_n_s16_zu10__SVBool_tu11__SVInt16_tm(
827 // CPP-CHECK-NEXT: entry:
828 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
829 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
830 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
831 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
832 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
833 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
835 svint16_t test_svlsl_wide_n_s16_z(svbool_t pg, svint16_t op1, uint64_t op2)
837 return SVE_ACLE_FUNC(svlsl_wide,_n_s16,_z,)(pg, op1, op2);
840 // CHECK-LABEL: @test_svlsl_wide_n_s32_z(
841 // CHECK-NEXT: entry:
842 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
843 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
844 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
845 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
846 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
847 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
849 // CPP-CHECK-LABEL: @_Z23test_svlsl_wide_n_s32_zu10__SVBool_tu11__SVInt32_tm(
850 // CPP-CHECK-NEXT: entry:
851 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
852 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
853 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
854 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
855 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
856 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
858 svint32_t test_svlsl_wide_n_s32_z(svbool_t pg, svint32_t op1, uint64_t op2)
860 return SVE_ACLE_FUNC(svlsl_wide,_n_s32,_z,)(pg, op1, op2);
863 // CHECK-LABEL: @test_svlsl_wide_n_s8_x(
864 // CHECK-NEXT: entry:
865 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
866 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
867 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
868 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
870 // CPP-CHECK-LABEL: @_Z22test_svlsl_wide_n_s8_xu10__SVBool_tu10__SVInt8_tm(
871 // CPP-CHECK-NEXT: entry:
872 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
873 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
874 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.lsl.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
875 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
877 svint8_t test_svlsl_wide_n_s8_x(svbool_t pg, svint8_t op1, uint64_t op2)
879 return SVE_ACLE_FUNC(svlsl_wide,_n_s8,_x,)(pg, op1, op2);
882 // CHECK-LABEL: @test_svlsl_wide_n_s16_x(
883 // CHECK-NEXT: entry:
884 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
885 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
886 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
887 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
888 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
890 // CPP-CHECK-LABEL: @_Z23test_svlsl_wide_n_s16_xu10__SVBool_tu11__SVInt16_tm(
891 // CPP-CHECK-NEXT: entry:
892 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
893 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
894 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
895 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.lsl.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
896 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
898 svint16_t test_svlsl_wide_n_s16_x(svbool_t pg, svint16_t op1, uint64_t op2)
900 return SVE_ACLE_FUNC(svlsl_wide,_n_s16,_x,)(pg, op1, op2);
903 // CHECK-LABEL: @test_svlsl_wide_n_s32_x(
904 // CHECK-NEXT: entry:
905 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
906 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
907 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
908 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
909 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
911 // CPP-CHECK-LABEL: @_Z23test_svlsl_wide_n_s32_xu10__SVBool_tu11__SVInt32_tm(
912 // CPP-CHECK-NEXT: entry:
913 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
914 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
915 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
916 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.lsl.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
917 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
919 svint32_t test_svlsl_wide_n_s32_x(svbool_t pg, svint32_t op1, uint64_t op2)
921 return SVE_ACLE_FUNC(svlsl_wide,_n_s32,_x,)(pg, op1, op2);