1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
17 // CHECK-LABEL: @test_svasr_s8_z(
19 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
20 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
21 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
23 // CPP-CHECK-LABEL: @_Z15test_svasr_s8_zu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
24 // CPP-CHECK-NEXT: entry:
25 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
26 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]])
27 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
29 svint8_t
test_svasr_s8_z(svbool_t pg
, svint8_t op1
, svuint8_t op2
)
31 return SVE_ACLE_FUNC(svasr
,_s8
,_z
,)(pg
, op1
, op2
);
34 // CHECK-LABEL: @test_svasr_s16_z(
36 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
37 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
38 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
39 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
41 // CPP-CHECK-LABEL: @_Z16test_svasr_s16_zu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
42 // CPP-CHECK-NEXT: entry:
43 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
44 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
45 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]])
46 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
48 svint16_t
test_svasr_s16_z(svbool_t pg
, svint16_t op1
, svuint16_t op2
)
50 return SVE_ACLE_FUNC(svasr
,_s16
,_z
,)(pg
, op1
, op2
);
53 // CHECK-LABEL: @test_svasr_s32_z(
55 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
56 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
57 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
58 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
60 // CPP-CHECK-LABEL: @_Z16test_svasr_s32_zu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
61 // CPP-CHECK-NEXT: entry:
62 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
63 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
64 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]])
65 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
67 svint32_t
test_svasr_s32_z(svbool_t pg
, svint32_t op1
, svuint32_t op2
)
69 return SVE_ACLE_FUNC(svasr
,_s32
,_z
,)(pg
, op1
, op2
);
72 // CHECK-LABEL: @test_svasr_s64_z(
74 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
75 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
76 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
77 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
79 // CPP-CHECK-LABEL: @_Z16test_svasr_s64_zu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
80 // CPP-CHECK-NEXT: entry:
81 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
82 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
83 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
84 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
86 svint64_t
test_svasr_s64_z(svbool_t pg
, svint64_t op1
, svuint64_t op2
)
88 return SVE_ACLE_FUNC(svasr
,_s64
,_z
,)(pg
, op1
, op2
);
91 // CHECK-LABEL: @test_svasr_s8_m(
93 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
94 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
96 // CPP-CHECK-LABEL: @_Z15test_svasr_s8_mu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
97 // CPP-CHECK-NEXT: entry:
98 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
99 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
101 svint8_t
test_svasr_s8_m(svbool_t pg
, svint8_t op1
, svuint8_t op2
)
103 return SVE_ACLE_FUNC(svasr
,_s8
,_m
,)(pg
, op1
, op2
);
106 // CHECK-LABEL: @test_svasr_s16_m(
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
109 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
110 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
112 // CPP-CHECK-LABEL: @_Z16test_svasr_s16_mu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
113 // CPP-CHECK-NEXT: entry:
114 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
115 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
116 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
118 svint16_t
test_svasr_s16_m(svbool_t pg
, svint16_t op1
, svuint16_t op2
)
120 return SVE_ACLE_FUNC(svasr
,_s16
,_m
,)(pg
, op1
, op2
);
123 // CHECK-LABEL: @test_svasr_s32_m(
124 // CHECK-NEXT: entry:
125 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
126 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
127 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
129 // CPP-CHECK-LABEL: @_Z16test_svasr_s32_mu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
130 // CPP-CHECK-NEXT: entry:
131 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
132 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
133 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
135 svint32_t
test_svasr_s32_m(svbool_t pg
, svint32_t op1
, svuint32_t op2
)
137 return SVE_ACLE_FUNC(svasr
,_s32
,_m
,)(pg
, op1
, op2
);
140 // CHECK-LABEL: @test_svasr_s64_m(
141 // CHECK-NEXT: entry:
142 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
143 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
144 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
146 // CPP-CHECK-LABEL: @_Z16test_svasr_s64_mu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
147 // CPP-CHECK-NEXT: entry:
148 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
149 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
150 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
152 svint64_t
test_svasr_s64_m(svbool_t pg
, svint64_t op1
, svuint64_t op2
)
154 return SVE_ACLE_FUNC(svasr
,_s64
,_m
,)(pg
, op1
, op2
);
157 // CHECK-LABEL: @test_svasr_s8_x(
158 // CHECK-NEXT: entry:
159 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
160 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
162 // CPP-CHECK-LABEL: @_Z15test_svasr_s8_xu10__SVBool_tu10__SVInt8_tu11__SVUint8_t(
163 // CPP-CHECK-NEXT: entry:
164 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
165 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
167 svint8_t
test_svasr_s8_x(svbool_t pg
, svint8_t op1
, svuint8_t op2
)
169 return SVE_ACLE_FUNC(svasr
,_s8
,_x
,)(pg
, op1
, op2
);
172 // CHECK-LABEL: @test_svasr_s16_x(
173 // CHECK-NEXT: entry:
174 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
175 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
176 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
178 // CPP-CHECK-LABEL: @_Z16test_svasr_s16_xu10__SVBool_tu11__SVInt16_tu12__SVUint16_t(
179 // CPP-CHECK-NEXT: entry:
180 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
181 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
182 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
184 svint16_t
test_svasr_s16_x(svbool_t pg
, svint16_t op1
, svuint16_t op2
)
186 return SVE_ACLE_FUNC(svasr
,_s16
,_x
,)(pg
, op1
, op2
);
189 // CHECK-LABEL: @test_svasr_s32_x(
190 // CHECK-NEXT: entry:
191 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
192 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
193 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
195 // CPP-CHECK-LABEL: @_Z16test_svasr_s32_xu10__SVBool_tu11__SVInt32_tu12__SVUint32_t(
196 // CPP-CHECK-NEXT: entry:
197 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
198 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
199 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
201 svint32_t
test_svasr_s32_x(svbool_t pg
, svint32_t op1
, svuint32_t op2
)
203 return SVE_ACLE_FUNC(svasr
,_s32
,_x
,)(pg
, op1
, op2
);
206 // CHECK-LABEL: @test_svasr_s64_x(
207 // CHECK-NEXT: entry:
208 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
209 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
210 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
212 // CPP-CHECK-LABEL: @_Z16test_svasr_s64_xu10__SVBool_tu11__SVInt64_tu12__SVUint64_t(
213 // CPP-CHECK-NEXT: entry:
214 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
215 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
216 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
218 svint64_t
test_svasr_s64_x(svbool_t pg
, svint64_t op1
, svuint64_t op2
)
220 return SVE_ACLE_FUNC(svasr
,_s64
,_x
,)(pg
, op1
, op2
);
223 // CHECK-LABEL: @test_svasr_n_s64_z(
224 // CHECK-NEXT: entry:
225 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
226 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
227 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
228 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
229 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
230 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
232 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s64_zu10__SVBool_tu11__SVInt64_tm(
233 // CPP-CHECK-NEXT: entry:
234 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
235 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
236 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
237 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer
238 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
239 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
241 svint64_t
test_svasr_n_s64_z(svbool_t pg
, svint64_t op1
, uint64_t op2
)
243 return SVE_ACLE_FUNC(svasr
,_n_s64
,_z
,)(pg
, op1
, op2
);
246 // CHECK-LABEL: @test_svasr_n_s64_m(
247 // CHECK-NEXT: entry:
248 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
249 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
250 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
251 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
252 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
254 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s64_mu10__SVBool_tu11__SVInt64_tm(
255 // CPP-CHECK-NEXT: entry:
256 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
257 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
258 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
259 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
260 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
262 svint64_t
test_svasr_n_s64_m(svbool_t pg
, svint64_t op1
, uint64_t op2
)
264 return SVE_ACLE_FUNC(svasr
,_n_s64
,_m
,)(pg
, op1
, op2
);
267 // CHECK-LABEL: @test_svasr_n_s64_x(
268 // CHECK-NEXT: entry:
269 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
270 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
271 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
272 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
273 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
275 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s64_xu10__SVBool_tu11__SVInt64_tm(
276 // CPP-CHECK-NEXT: entry:
277 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
278 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
279 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
280 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.asr.u.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
281 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
283 svint64_t
test_svasr_n_s64_x(svbool_t pg
, svint64_t op1
, uint64_t op2
)
285 return SVE_ACLE_FUNC(svasr
,_n_s64
,_x
,)(pg
, op1
, op2
);
288 // CHECK-LABEL: @test_svasr_wide_s8_z(
289 // CHECK-NEXT: entry:
290 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
291 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
292 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
294 // CPP-CHECK-LABEL: @_Z20test_svasr_wide_s8_zu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
295 // CPP-CHECK-NEXT: entry:
296 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
297 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[OP2:%.*]])
298 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
300 svint8_t
test_svasr_wide_s8_z(svbool_t pg
, svint8_t op1
, svuint64_t op2
)
302 return SVE_ACLE_FUNC(svasr_wide
,_s8
,_z
,)(pg
, op1
, op2
);
305 // CHECK-LABEL: @test_svasr_wide_s16_z(
306 // CHECK-NEXT: entry:
307 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
308 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
309 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
310 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
312 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s16_zu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
313 // CPP-CHECK-NEXT: entry:
314 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
315 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
316 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
317 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
319 svint16_t
test_svasr_wide_s16_z(svbool_t pg
, svint16_t op1
, svuint64_t op2
)
321 return SVE_ACLE_FUNC(svasr_wide
,_s16
,_z
,)(pg
, op1
, op2
);
324 // CHECK-LABEL: @test_svasr_wide_s32_z(
325 // CHECK-NEXT: entry:
326 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
327 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
328 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
329 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
331 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s32_zu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
332 // CPP-CHECK-NEXT: entry:
333 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
334 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
335 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]])
336 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
338 svint32_t
test_svasr_wide_s32_z(svbool_t pg
, svint32_t op1
, svuint64_t op2
)
340 return SVE_ACLE_FUNC(svasr_wide
,_s32
,_z
,)(pg
, op1
, op2
);
343 // CHECK-LABEL: @test_svasr_wide_s8_m(
344 // CHECK-NEXT: entry:
345 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
346 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
348 // CPP-CHECK-LABEL: @_Z20test_svasr_wide_s8_mu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
349 // CPP-CHECK-NEXT: entry:
350 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
351 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
353 svint8_t
test_svasr_wide_s8_m(svbool_t pg
, svint8_t op1
, svuint64_t op2
)
355 return SVE_ACLE_FUNC(svasr_wide
,_s8
,_m
,)(pg
, op1
, op2
);
358 // CHECK-LABEL: @test_svasr_wide_s16_m(
359 // CHECK-NEXT: entry:
360 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
361 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
362 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
364 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s16_mu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
365 // CPP-CHECK-NEXT: entry:
366 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
367 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
368 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
370 svint16_t
test_svasr_wide_s16_m(svbool_t pg
, svint16_t op1
, svuint64_t op2
)
372 return SVE_ACLE_FUNC(svasr_wide
,_s16
,_m
,)(pg
, op1
, op2
);
375 // CHECK-LABEL: @test_svasr_wide_s32_m(
376 // CHECK-NEXT: entry:
377 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
378 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
379 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
381 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s32_mu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
382 // CPP-CHECK-NEXT: entry:
383 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
384 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
385 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
387 svint32_t
test_svasr_wide_s32_m(svbool_t pg
, svint32_t op1
, svuint64_t op2
)
389 return SVE_ACLE_FUNC(svasr_wide
,_s32
,_m
,)(pg
, op1
, op2
);
392 // CHECK-LABEL: @test_svasr_wide_s8_x(
393 // CHECK-NEXT: entry:
394 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
395 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
397 // CPP-CHECK-LABEL: @_Z20test_svasr_wide_s8_xu10__SVBool_tu10__SVInt8_tu12__SVUint64_t(
398 // CPP-CHECK-NEXT: entry:
399 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
400 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
402 svint8_t
test_svasr_wide_s8_x(svbool_t pg
, svint8_t op1
, svuint64_t op2
)
404 return SVE_ACLE_FUNC(svasr_wide
,_s8
,_x
,)(pg
, op1
, op2
);
407 // CHECK-LABEL: @test_svasr_wide_s16_x(
408 // CHECK-NEXT: entry:
409 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
410 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
411 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
413 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s16_xu10__SVBool_tu11__SVInt16_tu12__SVUint64_t(
414 // CPP-CHECK-NEXT: entry:
415 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
416 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
417 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
419 svint16_t
test_svasr_wide_s16_x(svbool_t pg
, svint16_t op1
, svuint64_t op2
)
421 return SVE_ACLE_FUNC(svasr_wide
,_s16
,_x
,)(pg
, op1
, op2
);
424 // CHECK-LABEL: @test_svasr_wide_s32_x(
425 // CHECK-NEXT: entry:
426 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
427 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
428 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
430 // CPP-CHECK-LABEL: @_Z21test_svasr_wide_s32_xu10__SVBool_tu11__SVInt32_tu12__SVUint64_t(
431 // CPP-CHECK-NEXT: entry:
432 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
433 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
434 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
436 svint32_t
test_svasr_wide_s32_x(svbool_t pg
, svint32_t op1
, svuint64_t op2
)
438 return SVE_ACLE_FUNC(svasr_wide
,_s32
,_x
,)(pg
, op1
, op2
);
441 // CHECK-LABEL: @test_svasr_n_s8_z(
442 // CHECK-NEXT: entry:
443 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
444 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
445 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
446 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[DOTSPLAT]])
447 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
449 // CPP-CHECK-LABEL: @_Z17test_svasr_n_s8_zu10__SVBool_tu10__SVInt8_th(
450 // CPP-CHECK-NEXT: entry:
451 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
452 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
453 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
454 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[DOTSPLAT]])
455 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
457 svint8_t
test_svasr_n_s8_z(svbool_t pg
, svint8_t op1
, uint8_t op2
)
459 return SVE_ACLE_FUNC(svasr
,_n_s8
,_z
,)(pg
, op1
, op2
);
462 // CHECK-LABEL: @test_svasr_n_s16_z(
463 // CHECK-NEXT: entry:
464 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
465 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
466 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
467 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
468 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[DOTSPLAT]])
469 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
471 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s16_zu10__SVBool_tu11__SVInt16_tt(
472 // CPP-CHECK-NEXT: entry:
473 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
474 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
475 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
476 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
477 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[DOTSPLAT]])
478 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
480 svint16_t
test_svasr_n_s16_z(svbool_t pg
, svint16_t op1
, uint16_t op2
)
482 return SVE_ACLE_FUNC(svasr
,_n_s16
,_z
,)(pg
, op1
, op2
);
485 // CHECK-LABEL: @test_svasr_n_s32_z(
486 // CHECK-NEXT: entry:
487 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
488 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
489 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
490 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
491 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[DOTSPLAT]])
492 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
494 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s32_zu10__SVBool_tu11__SVInt32_tj(
495 // CPP-CHECK-NEXT: entry:
496 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
497 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
498 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
499 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
500 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[DOTSPLAT]])
501 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
503 svint32_t
test_svasr_n_s32_z(svbool_t pg
, svint32_t op1
, uint32_t op2
)
505 return SVE_ACLE_FUNC(svasr
,_n_s32
,_z
,)(pg
, op1
, op2
);
508 // CHECK-LABEL: @test_svasr_n_s8_m(
509 // CHECK-NEXT: entry:
510 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
511 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
512 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
513 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
515 // CPP-CHECK-LABEL: @_Z17test_svasr_n_s8_mu10__SVBool_tu10__SVInt8_th(
516 // CPP-CHECK-NEXT: entry:
517 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
518 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
519 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
520 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
522 svint8_t
test_svasr_n_s8_m(svbool_t pg
, svint8_t op1
, uint8_t op2
)
524 return SVE_ACLE_FUNC(svasr
,_n_s8
,_m
,)(pg
, op1
, op2
);
527 // CHECK-LABEL: @test_svasr_n_s16_m(
528 // CHECK-NEXT: entry:
529 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
530 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
531 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
532 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
533 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
535 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s16_mu10__SVBool_tu11__SVInt16_tt(
536 // CPP-CHECK-NEXT: entry:
537 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
538 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
539 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
540 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
541 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
543 svint16_t
test_svasr_n_s16_m(svbool_t pg
, svint16_t op1
, uint16_t op2
)
545 return SVE_ACLE_FUNC(svasr
,_n_s16
,_m
,)(pg
, op1
, op2
);
548 // CHECK-LABEL: @test_svasr_n_s32_m(
549 // CHECK-NEXT: entry:
550 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
551 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
552 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
553 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
554 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
556 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s32_mu10__SVBool_tu11__SVInt32_tj(
557 // CPP-CHECK-NEXT: entry:
558 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
559 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
560 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
561 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
562 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
564 svint32_t
test_svasr_n_s32_m(svbool_t pg
, svint32_t op1
, uint32_t op2
)
566 return SVE_ACLE_FUNC(svasr
,_n_s32
,_m
,)(pg
, op1
, op2
);
569 // CHECK-LABEL: @test_svasr_n_s8_x(
570 // CHECK-NEXT: entry:
571 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
572 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
573 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
574 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
576 // CPP-CHECK-LABEL: @_Z17test_svasr_n_s8_xu10__SVBool_tu10__SVInt8_th(
577 // CPP-CHECK-NEXT: entry:
578 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
579 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
580 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.u.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
581 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
583 svint8_t
test_svasr_n_s8_x(svbool_t pg
, svint8_t op1
, uint8_t op2
)
585 return SVE_ACLE_FUNC(svasr
,_n_s8
,_x
,)(pg
, op1
, op2
);
588 // CHECK-LABEL: @test_svasr_n_s16_x(
589 // CHECK-NEXT: entry:
590 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
591 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
592 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
593 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
594 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
596 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s16_xu10__SVBool_tu11__SVInt16_tt(
597 // CPP-CHECK-NEXT: entry:
598 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
599 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
600 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
601 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.u.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
602 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
604 svint16_t
test_svasr_n_s16_x(svbool_t pg
, svint16_t op1
, uint16_t op2
)
606 return SVE_ACLE_FUNC(svasr
,_n_s16
,_x
,)(pg
, op1
, op2
);
609 // CHECK-LABEL: @test_svasr_n_s32_x(
610 // CHECK-NEXT: entry:
611 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
612 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
613 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
614 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
615 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
617 // CPP-CHECK-LABEL: @_Z18test_svasr_n_s32_xu10__SVBool_tu11__SVInt32_tj(
618 // CPP-CHECK-NEXT: entry:
619 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
620 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
621 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
622 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.u.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
623 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
625 svint32_t
test_svasr_n_s32_x(svbool_t pg
, svint32_t op1
, uint32_t op2
)
627 return SVE_ACLE_FUNC(svasr
,_n_s32
,_x
,)(pg
, op1
, op2
);
630 // CHECK-LABEL: @test_svasr_wide_n_s8_m(
631 // CHECK-NEXT: entry:
632 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
633 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
634 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
635 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
637 // CPP-CHECK-LABEL: @_Z22test_svasr_wide_n_s8_mu10__SVBool_tu10__SVInt8_tm(
638 // CPP-CHECK-NEXT: entry:
639 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
640 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
641 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
642 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
644 svint8_t
test_svasr_wide_n_s8_m(svbool_t pg
, svint8_t op1
, uint64_t op2
)
646 return SVE_ACLE_FUNC(svasr_wide
,_n_s8
,_m
,)(pg
, op1
, op2
);
649 // CHECK-LABEL: @test_svasr_wide_n_s16_m(
650 // CHECK-NEXT: entry:
651 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
652 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
653 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
654 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
655 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
657 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s16_mu10__SVBool_tu11__SVInt16_tm(
658 // CPP-CHECK-NEXT: entry:
659 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
660 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
661 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
662 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
663 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
665 svint16_t
test_svasr_wide_n_s16_m(svbool_t pg
, svint16_t op1
, uint64_t op2
)
667 return SVE_ACLE_FUNC(svasr_wide
,_n_s16
,_m
,)(pg
, op1
, op2
);
670 // CHECK-LABEL: @test_svasr_wide_n_s32_m(
671 // CHECK-NEXT: entry:
672 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
673 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
674 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
675 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
676 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
678 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s32_mu10__SVBool_tu11__SVInt32_tm(
679 // CPP-CHECK-NEXT: entry:
680 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
681 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
682 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
683 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
684 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
686 svint32_t
test_svasr_wide_n_s32_m(svbool_t pg
, svint32_t op1
, uint64_t op2
)
688 return SVE_ACLE_FUNC(svasr_wide
,_n_s32
,_m
,)(pg
, op1
, op2
);
691 // CHECK-LABEL: @test_svasr_wide_n_s8_z(
692 // CHECK-NEXT: entry:
693 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
694 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
695 // CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
696 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[DOTSPLAT]])
697 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
699 // CPP-CHECK-LABEL: @_Z22test_svasr_wide_n_s8_zu10__SVBool_tu10__SVInt8_tm(
700 // CPP-CHECK-NEXT: entry:
701 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
702 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
703 // CPP-CHECK-NEXT: [[TMP0:%.*]] = select <vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer
704 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 2 x i64> [[DOTSPLAT]])
705 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]]
707 svint8_t
test_svasr_wide_n_s8_z(svbool_t pg
, svint8_t op1
, uint64_t op2
)
709 return SVE_ACLE_FUNC(svasr_wide
,_n_s8
,_z
,)(pg
, op1
, op2
);
712 // CHECK-LABEL: @test_svasr_wide_n_s16_z(
713 // CHECK-NEXT: entry:
714 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
715 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
716 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
717 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
718 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
719 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
721 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s16_zu10__SVBool_tu11__SVInt16_tm(
722 // CPP-CHECK-NEXT: entry:
723 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
724 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
725 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
726 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer
727 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
728 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]]
730 svint16_t
test_svasr_wide_n_s16_z(svbool_t pg
, svint16_t op1
, uint64_t op2
)
732 return SVE_ACLE_FUNC(svasr_wide
,_n_s16
,_z
,)(pg
, op1
, op2
);
735 // CHECK-LABEL: @test_svasr_wide_n_s32_z(
736 // CHECK-NEXT: entry:
737 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
738 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
739 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
740 // CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
741 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
742 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
744 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s32_zu10__SVBool_tu11__SVInt32_tm(
745 // CPP-CHECK-NEXT: entry:
746 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
747 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
748 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
749 // CPP-CHECK-NEXT: [[TMP1:%.*]] = select <vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer
750 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 2 x i64> [[DOTSPLAT]])
751 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
753 svint32_t
test_svasr_wide_n_s32_z(svbool_t pg
, svint32_t op1
, uint64_t op2
)
755 return SVE_ACLE_FUNC(svasr_wide
,_n_s32
,_z
,)(pg
, op1
, op2
);
758 // CHECK-LABEL: @test_svasr_wide_n_s8_x(
759 // CHECK-NEXT: entry:
760 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
761 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
762 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
763 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
765 // CPP-CHECK-LABEL: @_Z22test_svasr_wide_n_s8_xu10__SVBool_tu10__SVInt8_tm(
766 // CPP-CHECK-NEXT: entry:
767 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
768 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
769 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.asr.wide.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
770 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
772 svint8_t
test_svasr_wide_n_s8_x(svbool_t pg
, svint8_t op1
, uint64_t op2
)
774 return SVE_ACLE_FUNC(svasr_wide
,_n_s8
,_x
,)(pg
, op1
, op2
);
777 // CHECK-LABEL: @test_svasr_wide_n_s16_x(
778 // CHECK-NEXT: entry:
779 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
780 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
781 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
782 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
783 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
785 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s16_xu10__SVBool_tu11__SVInt16_tm(
786 // CPP-CHECK-NEXT: entry:
787 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
788 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
789 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
790 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.asr.wide.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
791 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
793 svint16_t
test_svasr_wide_n_s16_x(svbool_t pg
, svint16_t op1
, uint64_t op2
)
795 return SVE_ACLE_FUNC(svasr_wide
,_n_s16
,_x
,)(pg
, op1
, op2
);
798 // CHECK-LABEL: @test_svasr_wide_n_s32_x(
799 // CHECK-NEXT: entry:
800 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
801 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
802 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
803 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
804 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
806 // CPP-CHECK-LABEL: @_Z23test_svasr_wide_n_s32_xu10__SVBool_tu11__SVInt32_tm(
807 // CPP-CHECK-NEXT: entry:
808 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
809 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
810 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
811 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.asr.wide.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
812 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
814 svint32_t
test_svasr_wide_n_s32_x(svbool_t pg
, svint32_t op1
, uint64_t op2
)
816 return SVE_ACLE_FUNC(svasr_wide
,_n_s32
,_x
,)(pg
, op1
, op2
);