1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
8 // RUN: %clang_cc1 -triple aarch64 -target-feature +sme -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
12 #if defined __ARM_FEATURE_SME
13 #define MODE_ATTR __arm_streaming
18 #ifdef SVE_OVERLOADED_FORMS
19 // A simple used,unused... macro, long enough to represent any SVE builtin.
20 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
22 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
25 // CHECK-LABEL: @test_svasr_s8_z(
27 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
28 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
29 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
31 // CPP-CHECK-LABEL: @_Z15test_svasr_s8_zu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
32 // CPP-CHECK-NEXT: entry:
33 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
34 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
35 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
37 svint8_t
test_svasr_s8_z(svbool_t pg
, svint8_t op1
, svuint8_t op2
) MODE_ATTR
39 return SVE_ACLE_FUNC(svasr
,_s8
,_z
,)(pg
, op1
, op2
);
42 // CHECK-LABEL: @test_svasr_s16_z(
44 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
45 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
46 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
47 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
49 // CPP-CHECK-LABEL: @_Z16test_svasr_s16_zu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
50 // CPP-CHECK-NEXT: entry:
51 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
52 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
53 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
54 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
56 svint16_t
test_svasr_s16_z(svbool_t pg
, svint16_t op1
, svuint16_t op2
) MODE_ATTR
58 return SVE_ACLE_FUNC(svasr
,_s16
,_z
,)(pg
, op1
, op2
);
61 // CHECK-LABEL: @test_svasr_s32_z(
63 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
64 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
65 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
66 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
68 // CPP-CHECK-LABEL: @_Z16test_svasr_s32_zu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
69 // CPP-CHECK-NEXT: entry:
70 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
71 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
72 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
73 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
75 svint32_t
test_svasr_s32_z(svbool_t pg
, svint32_t op1
, svuint32_t op2
) MODE_ATTR
77 return SVE_ACLE_FUNC(svasr
,_s32
,_z
,)(pg
, op1
, op2
);
80 // CHECK-LABEL: @test_svasr_s64_z(
82 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
83 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
84 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
85 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
87 // CPP-CHECK-LABEL: @_Z16test_svasr_s64_zu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
88 // CPP-CHECK-NEXT: entry:
89 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
90 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
91 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
92 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
94 svint64_t
test_svasr_s64_z(svbool_t pg
, svint64_t op1
, svuint64_t op2
) MODE_ATTR
96 return SVE_ACLE_FUNC(svasr
,_s64
,_z
,)(pg
, op1
, op2
);
99 // CHECK-LABEL: @test_svasr_s8_m(
100 // CHECK-NEXT: entry:
101 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
102 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
104 // CPP-CHECK-LABEL: @_Z15test_svasr_s8_mu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
105 // CPP-CHECK-NEXT: entry:
106 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
107 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
109 svint8_t
test_svasr_s8_m(svbool_t pg
, svint8_t op1
, svuint8_t op2
) MODE_ATTR
111 return SVE_ACLE_FUNC(svasr
,_s8
,_m
,)(pg
, op1
, op2
);
114 // CHECK-LABEL: @test_svasr_s16_m(
115 // CHECK-NEXT: entry:
116 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
117 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
118 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
120 // CPP-CHECK-LABEL: @_Z16test_svasr_s16_mu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
121 // CPP-CHECK-NEXT: entry:
122 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
123 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
124 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
126 svint16_t
test_svasr_s16_m(svbool_t pg
, svint16_t op1
, svuint16_t op2
) MODE_ATTR
128 return SVE_ACLE_FUNC(svasr
,_s16
,_m
,)(pg
, op1
, op2
);
131 // CHECK-LABEL: @test_svasr_s32_m(
132 // CHECK-NEXT: entry:
133 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
134 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
135 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
137 // CPP-CHECK-LABEL: @_Z16test_svasr_s32_mu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
138 // CPP-CHECK-NEXT: entry:
139 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
140 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
141 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
143 svint32_t
test_svasr_s32_m(svbool_t pg
, svint32_t op1
, svuint32_t op2
) MODE_ATTR
145 return SVE_ACLE_FUNC(svasr
,_s32
,_m
,)(pg
, op1
, op2
);
148 // CHECK-LABEL: @test_svasr_s64_m(
149 // CHECK-NEXT: entry:
150 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
151 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
152 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
154 // CPP-CHECK-LABEL: @_Z16test_svasr_s64_mu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
155 // CPP-CHECK-NEXT: entry:
156 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
157 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
158 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
160 svint64_t
test_svasr_s64_m(svbool_t pg
, svint64_t op1
, svuint64_t op2
) MODE_ATTR
162 return SVE_ACLE_FUNC(svasr
,_s64
,_m
,)(pg
, op1
, op2
);
165 // CHECK-LABEL: @test_svasr_s8_x(
166 // CHECK-NEXT: entry:
167 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
168 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
170 // CPP-CHECK-LABEL: @_Z15test_svasr_s8_xu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
171 // CPP-CHECK-NEXT: entry:
172 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
173 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
175 svint8_t
test_svasr_s8_x(svbool_t pg
, svint8_t op1
, svuint8_t op2
) MODE_ATTR
177 return SVE_ACLE_FUNC(svasr
,_s8
,_x
,)(pg
, op1
, op2
);
180 // CHECK-LABEL: @test_svasr_s16_x(
181 // CHECK-NEXT: entry:
182 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
183 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
184 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
186 // CPP-CHECK-LABEL: @_Z16test_svasr_s16_xu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
187 // CPP-CHECK-NEXT: entry:
188 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
189 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
190 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
192 svint16_t
test_svasr_s16_x(svbool_t pg
, svint16_t op1
, svuint16_t op2
) MODE_ATTR
194 return SVE_ACLE_FUNC(svasr
,_s16
,_x
,)(pg
, op1
, op2
);
197 // CHECK-LABEL: @test_svasr_s32_x(
198 // CHECK-NEXT: entry:
199 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
200 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
201 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
203 // CPP-CHECK-LABEL: @_Z16test_svasr_s32_xu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
204 // CPP-CHECK-NEXT: entry:
205 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
206 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
207 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
209 svint32_t
test_svasr_s32_x(svbool_t pg
, svint32_t op1
, svuint32_t op2
) MODE_ATTR
211 return SVE_ACLE_FUNC(svasr
,_s32
,_x
,)(pg
, op1
, op2
);
214 // CHECK-LABEL: @test_svasr_s64_x(
215 // CHECK-NEXT: entry:
216 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
217 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
218 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
220 // CPP-CHECK-LABEL: @_Z16test_svasr_s64_xu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
221 // CPP-CHECK-NEXT: entry:
222 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
223 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
224 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
226 svint64_t
test_svasr_s64_x(svbool_t pg
, svint64_t op1
, svuint64_t op2
) MODE_ATTR
228 return SVE_ACLE_FUNC(svasr
,_s64
,_x
,)(pg
, op1
, op2
);
231 // CHECK-LABEL: @test_svasr_n_s64_z(
232 // CHECK-NEXT: entry:
233 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
234 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
235 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
236 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
237 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
238 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
240 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s64_zu10__SVBool_tu11__SVInt64_tm(
241 // CPP-CHECK-NEXT: entry:
242 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
243 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
244 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
245 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
246 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
247 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
249 svint64_t
test_svasr_n_s64_z(svbool_t pg
, svint64_t op1
, uint64_t op2
) MODE_ATTR
251 return SVE_ACLE_FUNC(svasr
,_n_s64
,_z
,)(pg
, op1
, op2
);
254 // CHECK-LABEL: @test_svasr_n_s64_m(
255 // CHECK-NEXT: entry:
256 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
257 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
258 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
259 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
260 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
262 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s64_mu10__SVBool_tu11__SVInt64_tm(
263 // CPP-CHECK-NEXT: entry:
264 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
265 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
266 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
267 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
268 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
270 svint64_t
test_svasr_n_s64_m(svbool_t pg
, svint64_t op1
, uint64_t op2
) MODE_ATTR
272 return SVE_ACLE_FUNC(svasr
,_n_s64
,_m
,)(pg
, op1
, op2
);
275 // CHECK-LABEL: @test_svasr_n_s64_x(
276 // CHECK-NEXT: entry:
277 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
278 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
279 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
280 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
281 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
283 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s64_xu10__SVBool_tu11__SVInt64_tm(
284 // CPP-CHECK-NEXT: entry:
285 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
286 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
287 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
288 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
289 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
291 svint64_t
test_svasr_n_s64_x(svbool_t pg
, svint64_t op1
, uint64_t op2
) MODE_ATTR
293 return SVE_ACLE_FUNC(svasr
,_n_s64
,_x
,)(pg
, op1
, op2
);
296 // CHECK-LABEL: @test_svasr_wide_s8_z(
297 // CHECK-NEXT: entry:
298 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
299 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
300 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
302 // CPP-CHECK-LABEL: @_Z20test_svasr_wide_s8_zu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
303 // CPP-CHECK-NEXT: entry:
304 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
305 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
306 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
308 svint8_t
test_svasr_wide_s8_z(svbool_t pg
, svint8_t op1
, svuint64_t op2
) MODE_ATTR
310 return SVE_ACLE_FUNC(svasr_wide
,_s8
,_z
,)(pg
, op1
, op2
);
313 // CHECK-LABEL: @test_svasr_wide_s16_z(
314 // CHECK-NEXT: entry:
315 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
316 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
317 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
318 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
320 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s16_zu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
321 // CPP-CHECK-NEXT: entry:
322 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
323 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
324 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
325 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
327 svint16_t
test_svasr_wide_s16_z(svbool_t pg
, svint16_t op1
, svuint64_t op2
) MODE_ATTR
329 return SVE_ACLE_FUNC(svasr_wide
,_s16
,_z
,)(pg
, op1
, op2
);
332 // CHECK-LABEL: @test_svasr_wide_s32_z(
333 // CHECK-NEXT: entry:
334 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
335 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
336 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
337 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
339 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s32_zu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
340 // CPP-CHECK-NEXT: entry:
341 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
342 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
343 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
344 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
346 svint32_t
test_svasr_wide_s32_z(svbool_t pg
, svint32_t op1
, svuint64_t op2
) MODE_ATTR
348 return SVE_ACLE_FUNC(svasr_wide
,_s32
,_z
,)(pg
, op1
, op2
);
351 // CHECK-LABEL: @test_svasr_wide_s8_m(
352 // CHECK-NEXT: entry:
353 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
354 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
356 // CPP-CHECK-LABEL: @_Z20test_svasr_wide_s8_mu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
357 // CPP-CHECK-NEXT: entry:
358 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
359 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
361 svint8_t
test_svasr_wide_s8_m(svbool_t pg
, svint8_t op1
, svuint64_t op2
) MODE_ATTR
363 return SVE_ACLE_FUNC(svasr_wide
,_s8
,_m
,)(pg
, op1
, op2
);
366 // CHECK-LABEL: @test_svasr_wide_s16_m(
367 // CHECK-NEXT: entry:
368 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
369 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
370 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
372 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s16_mu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
373 // CPP-CHECK-NEXT: entry:
374 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
375 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
376 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
378 svint16_t
test_svasr_wide_s16_m(svbool_t pg
, svint16_t op1
, svuint64_t op2
) MODE_ATTR
380 return SVE_ACLE_FUNC(svasr_wide
,_s16
,_m
,)(pg
, op1
, op2
);
383 // CHECK-LABEL: @test_svasr_wide_s32_m(
384 // CHECK-NEXT: entry:
385 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
386 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
387 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
389 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s32_mu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
390 // CPP-CHECK-NEXT: entry:
391 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
392 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
393 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
395 svint32_t
test_svasr_wide_s32_m(svbool_t pg
, svint32_t op1
, svuint64_t op2
) MODE_ATTR
397 return SVE_ACLE_FUNC(svasr_wide
,_s32
,_m
,)(pg
, op1
, op2
);
400 // CHECK-LABEL: @test_svasr_wide_s8_x(
401 // CHECK-NEXT: entry:
402 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
403 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
405 // CPP-CHECK-LABEL: @_Z20test_svasr_wide_s8_xu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
406 // CPP-CHECK-NEXT: entry:
407 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
408 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
410 svint8_t
test_svasr_wide_s8_x(svbool_t pg
, svint8_t op1
, svuint64_t op2
) MODE_ATTR
412 return SVE_ACLE_FUNC(svasr_wide
,_s8
,_x
,)(pg
, op1
, op2
);
415 // CHECK-LABEL: @test_svasr_wide_s16_x(
416 // CHECK-NEXT: entry:
417 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
418 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
419 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
421 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s16_xu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
422 // CPP-CHECK-NEXT: entry:
423 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
424 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
425 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
427 svint16_t
test_svasr_wide_s16_x(svbool_t pg
, svint16_t op1
, svuint64_t op2
) MODE_ATTR
429 return SVE_ACLE_FUNC(svasr_wide
,_s16
,_x
,)(pg
, op1
, op2
);
432 // CHECK-LABEL: @test_svasr_wide_s32_x(
433 // CHECK-NEXT: entry:
434 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
435 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
436 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
438 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s32_xu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
439 // CPP-CHECK-NEXT: entry:
440 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
441 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
442 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
444 svint32_t
test_svasr_wide_s32_x(svbool_t pg
, svint32_t op1
, svuint64_t op2
) MODE_ATTR
446 return SVE_ACLE_FUNC(svasr_wide
,_s32
,_x
,)(pg
, op1
, op2
);
449 // CHECK-LABEL: @test_svasr_n_s8_z(
450 // CHECK-NEXT: entry:
451 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
452 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
453 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
454 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[DOTSPLAT]])
455 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
457 // CPP-CHECK-LABEL: @_Z17test_svasr_n_s8_zu10__SVBool_tu10__SVInt8_th(
458 // CPP-CHECK-NEXT: entry:
459 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
460 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
461 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
462 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[DOTSPLAT]])
463 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
465 svint8_t
test_svasr_n_s8_z(svbool_t pg
, svint8_t op1
, uint8_t op2
) MODE_ATTR
467 return SVE_ACLE_FUNC(svasr
,_n_s8
,_z
,)(pg
, op1
, op2
);
470 // CHECK-LABEL: @test_svasr_n_s16_z(
471 // CHECK-NEXT: entry:
472 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
473 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
474 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
475 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
476 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[DOTSPLAT]])
477 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
479 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s16_zu10__SVBool_tu11__SVInt16_tt(
480 // CPP-CHECK-NEXT: entry:
481 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
482 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
483 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
484 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
485 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[DOTSPLAT]])
486 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
488 svint16_t
test_svasr_n_s16_z(svbool_t pg
, svint16_t op1
, uint16_t op2
) MODE_ATTR
490 return SVE_ACLE_FUNC(svasr
,_n_s16
,_z
,)(pg
, op1
, op2
);
493 // CHECK-LABEL: @test_svasr_n_s32_z(
494 // CHECK-NEXT: entry:
495 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
496 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
497 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
498 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
499 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[DOTSPLAT]])
500 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
502 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s32_zu10__SVBool_tu11__SVInt32_tj(
503 // CPP-CHECK-NEXT: entry:
504 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
505 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
506 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
507 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
508 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[DOTSPLAT]])
509 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
511 svint32_t
test_svasr_n_s32_z(svbool_t pg
, svint32_t op1
, uint32_t op2
) MODE_ATTR
513 return SVE_ACLE_FUNC(svasr
,_n_s32
,_z
,)(pg
, op1
, op2
);
516 // CHECK-LABEL: @test_svasr_n_s8_m(
517 // CHECK-NEXT: entry:
518 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
519 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
520 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
521 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
523 // CPP-CHECK-LABEL: @_Z17test_svasr_n_s8_mu10__SVBool_tu10__SVInt8_th(
524 // CPP-CHECK-NEXT: entry:
525 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
526 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
527 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
528 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
530 svint8_t
test_svasr_n_s8_m(svbool_t pg
, svint8_t op1
, uint8_t op2
) MODE_ATTR
532 return SVE_ACLE_FUNC(svasr
,_n_s8
,_m
,)(pg
, op1
, op2
);
535 // CHECK-LABEL: @test_svasr_n_s16_m(
536 // CHECK-NEXT: entry:
537 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
538 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
539 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
540 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
541 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
543 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s16_mu10__SVBool_tu11__SVInt16_tt(
544 // CPP-CHECK-NEXT: entry:
545 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
546 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
547 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
548 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
549 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
551 svint16_t
test_svasr_n_s16_m(svbool_t pg
, svint16_t op1
, uint16_t op2
) MODE_ATTR
553 return SVE_ACLE_FUNC(svasr
,_n_s16
,_m
,)(pg
, op1
, op2
);
556 // CHECK-LABEL: @test_svasr_n_s32_m(
557 // CHECK-NEXT: entry:
558 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
559 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
560 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
561 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
562 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
564 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s32_mu10__SVBool_tu11__SVInt32_tj(
565 // CPP-CHECK-NEXT: entry:
566 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
567 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
568 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
569 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
570 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
572 svint32_t
test_svasr_n_s32_m(svbool_t pg
, svint32_t op1
, uint32_t op2
) MODE_ATTR
574 return SVE_ACLE_FUNC(svasr
,_n_s32
,_m
,)(pg
, op1
, op2
);
577 // CHECK-LABEL: @test_svasr_n_s8_x(
578 // CHECK-NEXT: entry:
579 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
580 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
581 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
582 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
584 // CPP-CHECK-LABEL: @_Z17test_svasr_n_s8_xu10__SVBool_tu10__SVInt8_th(
585 // CPP-CHECK-NEXT: entry:
586 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
587 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
588 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
589 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
591 svint8_t
test_svasr_n_s8_x(svbool_t pg
, svint8_t op1
, uint8_t op2
) MODE_ATTR
593 return SVE_ACLE_FUNC(svasr
,_n_s8
,_x
,)(pg
, op1
, op2
);
596 // CHECK-LABEL: @test_svasr_n_s16_x(
597 // CHECK-NEXT: entry:
598 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
599 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
600 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
601 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
602 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
604 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s16_xu10__SVBool_tu11__SVInt16_tt(
605 // CPP-CHECK-NEXT: entry:
606 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
607 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
608 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
609 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
610 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
612 svint16_t
test_svasr_n_s16_x(svbool_t pg
, svint16_t op1
, uint16_t op2
) MODE_ATTR
614 return SVE_ACLE_FUNC(svasr
,_n_s16
,_x
,)(pg
, op1
, op2
);
617 // CHECK-LABEL: @test_svasr_n_s32_x(
618 // CHECK-NEXT: entry:
619 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
620 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
621 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
622 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
623 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
625 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s32_xu10__SVBool_tu11__SVInt32_tj(
626 // CPP-CHECK-NEXT: entry:
627 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
628 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
629 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
630 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
631 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
633 svint32_t
test_svasr_n_s32_x(svbool_t pg
, svint32_t op1
, uint32_t op2
) MODE_ATTR
635 return SVE_ACLE_FUNC(svasr
,_n_s32
,_x
,)(pg
, op1
, op2
);
638 // CHECK-LABEL: @test_svasr_wide_n_s8_m(
639 // CHECK-NEXT: entry:
640 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
641 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
642 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
643 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
645 // CPP-CHECK-LABEL: @_Z22test_svasr_wide_n_s8_mu10__SVBool_tu10__SVInt8_tm(
646 // CPP-CHECK-NEXT: entry:
647 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
648 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
649 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
650 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
652 svint8_t
test_svasr_wide_n_s8_m(svbool_t pg
, svint8_t op1
, uint64_t op2
) MODE_ATTR
654 return SVE_ACLE_FUNC(svasr_wide
,_n_s8
,_m
,)(pg
, op1
, op2
);
657 // CHECK-LABEL: @test_svasr_wide_n_s16_m(
658 // CHECK-NEXT: entry:
659 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
660 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
661 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
662 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
663 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
665 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s16_mu10__SVBool_tu11__SVInt16_tm(
666 // CPP-CHECK-NEXT: entry:
667 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
668 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
669 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
670 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
671 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
673 svint16_t
test_svasr_wide_n_s16_m(svbool_t pg
, svint16_t op1
, uint64_t op2
) MODE_ATTR
675 return SVE_ACLE_FUNC(svasr_wide
,_n_s16
,_m
,)(pg
, op1
, op2
);
678 // CHECK-LABEL: @test_svasr_wide_n_s32_m(
679 // CHECK-NEXT: entry:
680 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
681 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
682 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
683 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
684 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
686 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s32_mu10__SVBool_tu11__SVInt32_tm(
687 // CPP-CHECK-NEXT: entry:
688 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
689 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
690 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
691 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
692 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
694 svint32_t
test_svasr_wide_n_s32_m(svbool_t pg
, svint32_t op1
, uint64_t op2
) MODE_ATTR
696 return SVE_ACLE_FUNC(svasr_wide
,_n_s32
,_m
,)(pg
, op1
, op2
);
699 // CHECK-LABEL: @test_svasr_wide_n_s8_z(
700 // CHECK-NEXT: entry:
701 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
702 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
703 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
704 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[DOTSPLAT]])
705 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
707 // CPP-CHECK-LABEL: @_Z22test_svasr_wide_n_s8_zu10__SVBool_tu10__SVInt8_tm(
708 // CPP-CHECK-NEXT: entry:
709 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
710 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
711 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
712 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[DOTSPLAT]])
713 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
715 svint8_t
test_svasr_wide_n_s8_z(svbool_t pg
, svint8_t op1
, uint64_t op2
) MODE_ATTR
717 return SVE_ACLE_FUNC(svasr_wide
,_n_s8
,_z
,)(pg
, op1
, op2
);
720 // CHECK-LABEL: @test_svasr_wide_n_s16_z(
721 // CHECK-NEXT: entry:
722 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
723 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
724 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
725 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
726 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
727 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
729 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s16_zu10__SVBool_tu11__SVInt16_tm(
730 // CPP-CHECK-NEXT: entry:
731 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
732 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
733 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
734 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
735 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
736 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
738 svint16_t
test_svasr_wide_n_s16_z(svbool_t pg
, svint16_t op1
, uint64_t op2
) MODE_ATTR
740 return SVE_ACLE_FUNC(svasr_wide
,_n_s16
,_z
,)(pg
, op1
, op2
);
743 // CHECK-LABEL: @test_svasr_wide_n_s32_z(
744 // CHECK-NEXT: entry:
745 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
746 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
747 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
748 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
749 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
750 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
752 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s32_zu10__SVBool_tu11__SVInt32_tm(
753 // CPP-CHECK-NEXT: entry:
754 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
755 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
756 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
757 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
758 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
759 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
761 svint32_t
test_svasr_wide_n_s32_z(svbool_t pg
, svint32_t op1
, uint64_t op2
) MODE_ATTR
763 return SVE_ACLE_FUNC(svasr_wide
,_n_s32
,_z
,)(pg
, op1
, op2
);
766 // CHECK-LABEL: @test_svasr_wide_n_s8_x(
767 // CHECK-NEXT: entry:
768 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
769 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
770 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
771 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
773 // CPP-CHECK-LABEL: @_Z22test_svasr_wide_n_s8_xu10__SVBool_tu10__SVInt8_tm(
774 // CPP-CHECK-NEXT: entry:
775 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
776 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
777 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
778 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
780 svint8_t
test_svasr_wide_n_s8_x(svbool_t pg
, svint8_t op1
, uint64_t op2
) MODE_ATTR
782 return SVE_ACLE_FUNC(svasr_wide
,_n_s8
,_x
,)(pg
, op1
, op2
);
785 // CHECK-LABEL: @test_svasr_wide_n_s16_x(
786 // CHECK-NEXT: entry:
787 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
788 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
789 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
790 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
791 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
793 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s16_xu10__SVBool_tu11__SVInt16_tm(
794 // CPP-CHECK-NEXT: entry:
795 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
796 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
797 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
798 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
799 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
801 svint16_t
test_svasr_wide_n_s16_x(svbool_t pg
, svint16_t op1
, uint64_t op2
) MODE_ATTR
803 return SVE_ACLE_FUNC(svasr_wide
,_n_s16
,_x
,)(pg
, op1
, op2
);
806 // CHECK-LABEL: @test_svasr_wide_n_s32_x(
807 // CHECK-NEXT: entry:
808 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
809 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
810 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
811 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
812 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
814 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s32_xu10__SVBool_tu11__SVInt32_tm(
815 // CPP-CHECK-NEXT: entry:
816 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
817 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
818 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
819 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
820 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
822 svint32_t
test_svasr_wide_n_s32_x(svbool_t pg
, svint32_t op1
, uint64_t op2
) MODE_ATTR
824 return SVE_ACLE_FUNC(svasr_wide
,_n_s32
,_x
,)(pg
, op1
, op2
);