Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / AArch64 / sve-intrinsics / acle_sve_dup.c
blobfec77ceb463ffe2a72d222b1785e331ec9672416
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -S -disable-O0-optnone -Werror -o /dev/null %s
8 // RUN: %clang_cc1 -triple aarch64 -target-feature +sme -S -disable-O0-optnone -Werror -o /dev/null %s
10 #include <arm_sve.h>
12 #if defined __ARM_FEATURE_SME
13 #define MODE_ATTR __arm_streaming
14 #else
15 #define MODE_ATTR
16 #endif
18 #ifdef SVE_OVERLOADED_FORMS
19 // A simple used,unused... macro, long enough to represent any SVE builtin.
20 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
21 #else
22 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
23 #endif
25 // CHECK-LABEL: @test_svdup_n_s8(
26 // CHECK-NEXT: entry:
27 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP:%.*]], i64 0
28 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
29 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
31 // CPP-CHECK-LABEL: @_Z15test_svdup_n_s8a(
32 // CPP-CHECK-NEXT: entry:
33 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP:%.*]], i64 0
34 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
35 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
37 svint8_t test_svdup_n_s8(int8_t op) MODE_ATTR
39 return SVE_ACLE_FUNC(svdup,_n,_s8,)(op);
42 // CHECK-LABEL: @test_svdup_n_s16(
43 // CHECK-NEXT: entry:
44 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP:%.*]], i64 0
45 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
46 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
48 // CPP-CHECK-LABEL: @_Z16test_svdup_n_s16s(
49 // CPP-CHECK-NEXT: entry:
50 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP:%.*]], i64 0
51 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
52 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
54 svint16_t test_svdup_n_s16(int16_t op) MODE_ATTR
56 return SVE_ACLE_FUNC(svdup,_n,_s16,)(op);
59 // CHECK-LABEL: @test_svdup_n_s32(
60 // CHECK-NEXT: entry:
61 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP:%.*]], i64 0
62 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
63 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
65 // CPP-CHECK-LABEL: @_Z16test_svdup_n_s32i(
66 // CPP-CHECK-NEXT: entry:
67 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP:%.*]], i64 0
68 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
69 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
71 svint32_t test_svdup_n_s32(int32_t op) MODE_ATTR
73 return SVE_ACLE_FUNC(svdup,_n,_s32,)(op);
76 // CHECK-LABEL: @test_svdup_n_s64(
77 // CHECK-NEXT: entry:
78 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP:%.*]], i64 0
79 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
80 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
82 // CPP-CHECK-LABEL: @_Z16test_svdup_n_s64l(
83 // CPP-CHECK-NEXT: entry:
84 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP:%.*]], i64 0
85 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
86 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
88 svint64_t test_svdup_n_s64(int64_t op) MODE_ATTR
90 return SVE_ACLE_FUNC(svdup,_n,_s64,)(op);
93 // CHECK-LABEL: @test_svdup_n_u8(
94 // CHECK-NEXT: entry:
95 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP:%.*]], i64 0
96 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
97 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
99 // CPP-CHECK-LABEL: @_Z15test_svdup_n_u8h(
100 // CPP-CHECK-NEXT: entry:
101 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP:%.*]], i64 0
102 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
103 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
105 svuint8_t test_svdup_n_u8(uint8_t op) MODE_ATTR
107 return SVE_ACLE_FUNC(svdup,_n,_u8,)(op);
110 // CHECK-LABEL: @test_svdup_n_u16(
111 // CHECK-NEXT: entry:
112 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP:%.*]], i64 0
113 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
114 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
116 // CPP-CHECK-LABEL: @_Z16test_svdup_n_u16t(
117 // CPP-CHECK-NEXT: entry:
118 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP:%.*]], i64 0
119 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
120 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
122 svuint16_t test_svdup_n_u16(uint16_t op) MODE_ATTR
124 return SVE_ACLE_FUNC(svdup,_n,_u16,)(op);
127 // CHECK-LABEL: @test_svdup_n_u32(
128 // CHECK-NEXT: entry:
129 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP:%.*]], i64 0
130 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
131 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
133 // CPP-CHECK-LABEL: @_Z16test_svdup_n_u32j(
134 // CPP-CHECK-NEXT: entry:
135 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP:%.*]], i64 0
136 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
137 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
139 svuint32_t test_svdup_n_u32(uint32_t op) MODE_ATTR
141 return SVE_ACLE_FUNC(svdup,_n,_u32,)(op);
144 // CHECK-LABEL: @test_svdup_n_u64(
145 // CHECK-NEXT: entry:
146 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP:%.*]], i64 0
147 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
148 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
150 // CPP-CHECK-LABEL: @_Z16test_svdup_n_u64m(
151 // CPP-CHECK-NEXT: entry:
152 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP:%.*]], i64 0
153 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
154 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
156 svuint64_t test_svdup_n_u64(uint64_t op) MODE_ATTR
158 return SVE_ACLE_FUNC(svdup,_n,_u64,)(op);
161 // CHECK-LABEL: @test_svdup_n_f16(
162 // CHECK-NEXT: entry:
163 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[OP:%.*]], i64 0
164 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x half> [[DOTSPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
165 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
167 // CPP-CHECK-LABEL: @_Z16test_svdup_n_f16Dh(
168 // CPP-CHECK-NEXT: entry:
169 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[OP:%.*]], i64 0
170 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x half> [[DOTSPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
171 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
173 svfloat16_t test_svdup_n_f16(float16_t op) MODE_ATTR
175 return SVE_ACLE_FUNC(svdup,_n,_f16,)(op);
178 // CHECK-LABEL: @test_svdup_n_f32(
179 // CHECK-NEXT: entry:
180 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[OP:%.*]], i64 0
181 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x float> [[DOTSPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
182 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
184 // CPP-CHECK-LABEL: @_Z16test_svdup_n_f32f(
185 // CPP-CHECK-NEXT: entry:
186 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[OP:%.*]], i64 0
187 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x float> [[DOTSPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
188 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
190 svfloat32_t test_svdup_n_f32(float32_t op) MODE_ATTR
192 return SVE_ACLE_FUNC(svdup,_n,_f32,)(op);
195 // CHECK-LABEL: @test_svdup_n_f64(
196 // CHECK-NEXT: entry:
197 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[OP:%.*]], i64 0
198 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x double> [[DOTSPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
199 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
201 // CPP-CHECK-LABEL: @_Z16test_svdup_n_f64d(
202 // CPP-CHECK-NEXT: entry:
203 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[OP:%.*]], i64 0
204 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x double> [[DOTSPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
205 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
207 svfloat64_t test_svdup_n_f64(float64_t op) MODE_ATTR
209 return SVE_ACLE_FUNC(svdup,_n,_f64,)(op);
212 // CHECK-LABEL: @test_svdup_n_s8_z(
213 // CHECK-NEXT: entry:
214 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
215 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
217 // CPP-CHECK-LABEL: @_Z17test_svdup_n_s8_zu10__SVBool_ta(
218 // CPP-CHECK-NEXT: entry:
219 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
220 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
222 svint8_t test_svdup_n_s8_z(svbool_t pg, int8_t op) MODE_ATTR
224 return SVE_ACLE_FUNC(svdup,_n,_s8_z,)(pg, op);
227 // CHECK-LABEL: @test_svdup_n_s16_z(
228 // CHECK-NEXT: entry:
229 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
230 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
231 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
233 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s16_zu10__SVBool_ts(
234 // CPP-CHECK-NEXT: entry:
235 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
236 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
237 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
239 svint16_t test_svdup_n_s16_z(svbool_t pg, int16_t op) MODE_ATTR
241 return SVE_ACLE_FUNC(svdup,_n,_s16_z,)(pg, op);
244 // CHECK-LABEL: @test_svdup_n_s32_z(
245 // CHECK-NEXT: entry:
246 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
247 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
248 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
250 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s32_zu10__SVBool_ti(
251 // CPP-CHECK-NEXT: entry:
252 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
253 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
254 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
256 svint32_t test_svdup_n_s32_z(svbool_t pg, int32_t op) MODE_ATTR
258 return SVE_ACLE_FUNC(svdup,_n,_s32_z,)(pg, op);
261 // CHECK-LABEL: @test_svdup_n_s64_z(
262 // CHECK-NEXT: entry:
263 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
264 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
265 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
267 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s64_zu10__SVBool_tl(
268 // CPP-CHECK-NEXT: entry:
269 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
270 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
271 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
273 svint64_t test_svdup_n_s64_z(svbool_t pg, int64_t op) MODE_ATTR
275 return SVE_ACLE_FUNC(svdup,_n,_s64_z,)(pg, op);
278 // CHECK-LABEL: @test_svdup_n_u8_z(
279 // CHECK-NEXT: entry:
280 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
281 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
283 // CPP-CHECK-LABEL: @_Z17test_svdup_n_u8_zu10__SVBool_th(
284 // CPP-CHECK-NEXT: entry:
285 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
286 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
288 svuint8_t test_svdup_n_u8_z(svbool_t pg, uint8_t op) MODE_ATTR
290 return SVE_ACLE_FUNC(svdup,_n,_u8_z,)(pg, op);
293 // CHECK-LABEL: @test_svdup_n_u16_z(
294 // CHECK-NEXT: entry:
295 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
296 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
297 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
299 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u16_zu10__SVBool_tt(
300 // CPP-CHECK-NEXT: entry:
301 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
302 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
303 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
305 svuint16_t test_svdup_n_u16_z(svbool_t pg, uint16_t op) MODE_ATTR
307 return SVE_ACLE_FUNC(svdup,_n,_u16_z,)(pg, op);
310 // CHECK-LABEL: @test_svdup_n_u32_z(
311 // CHECK-NEXT: entry:
312 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
313 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
314 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
316 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u32_zu10__SVBool_tj(
317 // CPP-CHECK-NEXT: entry:
318 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
319 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
320 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
322 svuint32_t test_svdup_n_u32_z(svbool_t pg, uint32_t op) MODE_ATTR
324 return SVE_ACLE_FUNC(svdup,_n,_u32_z,)(pg, op);
327 // CHECK-LABEL: @test_svdup_n_u64_z(
328 // CHECK-NEXT: entry:
329 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
330 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
331 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
333 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u64_zu10__SVBool_tm(
334 // CPP-CHECK-NEXT: entry:
335 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
336 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
337 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
339 svuint64_t test_svdup_n_u64_z(svbool_t pg, uint64_t op) MODE_ATTR
341 return SVE_ACLE_FUNC(svdup,_n,_u64_z,)(pg, op);
344 // CHECK-LABEL: @test_svdup_n_f16_z(
345 // CHECK-NEXT: entry:
346 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
347 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
348 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
350 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f16_zu10__SVBool_tDh(
351 // CPP-CHECK-NEXT: entry:
352 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
353 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
354 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
356 svfloat16_t test_svdup_n_f16_z(svbool_t pg, float16_t op) MODE_ATTR
358 return SVE_ACLE_FUNC(svdup,_n,_f16_z,)(pg, op);
361 // CHECK-LABEL: @test_svdup_n_f32_z(
362 // CHECK-NEXT: entry:
363 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
364 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
365 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
367 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f32_zu10__SVBool_tf(
368 // CPP-CHECK-NEXT: entry:
369 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
370 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
371 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
373 svfloat32_t test_svdup_n_f32_z(svbool_t pg, float32_t op) MODE_ATTR
375 return SVE_ACLE_FUNC(svdup,_n,_f32_z,)(pg, op);
378 // CHECK-LABEL: @test_svdup_n_f64_z(
379 // CHECK-NEXT: entry:
380 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
381 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
382 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
384 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f64_zu10__SVBool_td(
385 // CPP-CHECK-NEXT: entry:
386 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
387 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
388 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
390 svfloat64_t test_svdup_n_f64_z(svbool_t pg, float64_t op) MODE_ATTR
392 return SVE_ACLE_FUNC(svdup,_n,_f64_z,)(pg, op);
395 // CHECK-LABEL: @test_svdup_n_s8_m(
396 // CHECK-NEXT: entry:
397 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
398 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
400 // CPP-CHECK-LABEL: @_Z17test_svdup_n_s8_mu10__SVInt8_tu10__SVBool_ta(
401 // CPP-CHECK-NEXT: entry:
402 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
403 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
405 svint8_t test_svdup_n_s8_m(svint8_t inactive, svbool_t pg, int8_t op) MODE_ATTR
407 return SVE_ACLE_FUNC(svdup,_n,_s8_m,)(inactive, pg, op);
410 // CHECK-LABEL: @test_svdup_n_s16_m(
411 // CHECK-NEXT: entry:
412 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
413 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
414 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
416 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s16_mu11__SVInt16_tu10__SVBool_ts(
417 // CPP-CHECK-NEXT: entry:
418 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
419 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
420 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
422 svint16_t test_svdup_n_s16_m(svint16_t inactive, svbool_t pg, int16_t op) MODE_ATTR
424 return SVE_ACLE_FUNC(svdup,_n,_s16_m,)(inactive, pg, op);
427 // CHECK-LABEL: @test_svdup_n_s32_m(
428 // CHECK-NEXT: entry:
429 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
430 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
431 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
433 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s32_mu11__SVInt32_tu10__SVBool_ti(
434 // CPP-CHECK-NEXT: entry:
435 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
436 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
437 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
439 svint32_t test_svdup_n_s32_m(svint32_t inactive, svbool_t pg, int32_t op) MODE_ATTR
441 return SVE_ACLE_FUNC(svdup,_n,_s32_m,)(inactive, pg, op);
444 // CHECK-LABEL: @test_svdup_n_s64_m(
445 // CHECK-NEXT: entry:
446 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
447 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
448 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
450 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s64_mu11__SVInt64_tu10__SVBool_tl(
451 // CPP-CHECK-NEXT: entry:
452 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
453 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
454 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
456 svint64_t test_svdup_n_s64_m(svint64_t inactive, svbool_t pg, int64_t op) MODE_ATTR
458 return SVE_ACLE_FUNC(svdup,_n,_s64_m,)(inactive, pg, op);
461 // CHECK-LABEL: @test_svdup_n_u8_m(
462 // CHECK-NEXT: entry:
463 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
464 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
466 // CPP-CHECK-LABEL: @_Z17test_svdup_n_u8_mu11__SVUint8_tu10__SVBool_th(
467 // CPP-CHECK-NEXT: entry:
468 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
469 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
471 svuint8_t test_svdup_n_u8_m(svuint8_t inactive, svbool_t pg, uint8_t op) MODE_ATTR
473 return SVE_ACLE_FUNC(svdup,_n,_u8_m,)(inactive, pg, op);
476 // CHECK-LABEL: @test_svdup_n_u16_m(
477 // CHECK-NEXT: entry:
478 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
479 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
480 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
482 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u16_mu12__SVUint16_tu10__SVBool_tt(
483 // CPP-CHECK-NEXT: entry:
484 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
485 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
486 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
488 svuint16_t test_svdup_n_u16_m(svuint16_t inactive, svbool_t pg, uint16_t op) MODE_ATTR
490 return SVE_ACLE_FUNC(svdup,_n,_u16_m,)(inactive, pg, op);
493 // CHECK-LABEL: @test_svdup_n_u32_m(
494 // CHECK-NEXT: entry:
495 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
496 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
497 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
499 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u32_mu12__SVUint32_tu10__SVBool_tj(
500 // CPP-CHECK-NEXT: entry:
501 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
502 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
503 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
505 svuint32_t test_svdup_n_u32_m(svuint32_t inactive, svbool_t pg, uint32_t op) MODE_ATTR
507 return SVE_ACLE_FUNC(svdup,_n,_u32_m,)(inactive, pg, op);
510 // CHECK-LABEL: @test_svdup_n_u64_m(
511 // CHECK-NEXT: entry:
512 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
513 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
514 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
516 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u64_mu12__SVUint64_tu10__SVBool_tm(
517 // CPP-CHECK-NEXT: entry:
518 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
519 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
520 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
522 svuint64_t test_svdup_n_u64_m(svuint64_t inactive, svbool_t pg, uint64_t op) MODE_ATTR
524 return SVE_ACLE_FUNC(svdup,_n,_u64_m,)(inactive, pg, op);
527 // CHECK-LABEL: @test_svdup_n_f16_m(
528 // CHECK-NEXT: entry:
529 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
530 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
531 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
533 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f16_mu13__SVFloat16_tu10__SVBool_tDh(
534 // CPP-CHECK-NEXT: entry:
535 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
536 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
537 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
539 svfloat16_t test_svdup_n_f16_m(svfloat16_t inactive, svbool_t pg, float16_t op) MODE_ATTR
541 return SVE_ACLE_FUNC(svdup,_n,_f16_m,)(inactive, pg, op);
544 // CHECK-LABEL: @test_svdup_n_f32_m(
545 // CHECK-NEXT: entry:
546 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
547 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
548 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
550 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f32_mu13__SVFloat32_tu10__SVBool_tf(
551 // CPP-CHECK-NEXT: entry:
552 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
553 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
554 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
556 svfloat32_t test_svdup_n_f32_m(svfloat32_t inactive, svbool_t pg, float32_t op) MODE_ATTR
558 return SVE_ACLE_FUNC(svdup,_n,_f32_m,)(inactive, pg, op);
561 // CHECK-LABEL: @test_svdup_n_f64_m(
562 // CHECK-NEXT: entry:
563 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
564 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
565 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
567 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f64_mu13__SVFloat64_tu10__SVBool_td(
568 // CPP-CHECK-NEXT: entry:
569 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
570 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
571 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
573 svfloat64_t test_svdup_n_f64_m(svfloat64_t inactive, svbool_t pg, float64_t op) MODE_ATTR
575 return SVE_ACLE_FUNC(svdup,_n,_f64_m,)(inactive, pg, op);
578 // CHECK-LABEL: @test_svdup_n_s8_x(
579 // CHECK-NEXT: entry:
580 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
581 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
583 // CPP-CHECK-LABEL: @_Z17test_svdup_n_s8_xu10__SVBool_ta(
584 // CPP-CHECK-NEXT: entry:
585 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
586 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
588 svint8_t test_svdup_n_s8_x(svbool_t pg, int8_t op) MODE_ATTR
590 return SVE_ACLE_FUNC(svdup,_n,_s8_x,)(pg, op);
593 // CHECK-LABEL: @test_svdup_n_s16_x(
594 // CHECK-NEXT: entry:
595 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
596 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
597 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
599 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s16_xu10__SVBool_ts(
600 // CPP-CHECK-NEXT: entry:
601 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
602 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
603 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
605 svint16_t test_svdup_n_s16_x(svbool_t pg, int16_t op) MODE_ATTR
607 return SVE_ACLE_FUNC(svdup,_n,_s16_x,)(pg, op);
610 // CHECK-LABEL: @test_svdup_n_s32_x(
611 // CHECK-NEXT: entry:
612 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
613 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
614 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
616 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s32_xu10__SVBool_ti(
617 // CPP-CHECK-NEXT: entry:
618 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
619 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
620 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
622 svint32_t test_svdup_n_s32_x(svbool_t pg, int32_t op) MODE_ATTR
624 return SVE_ACLE_FUNC(svdup,_n,_s32_x,)(pg, op);
627 // CHECK-LABEL: @test_svdup_n_s64_x(
628 // CHECK-NEXT: entry:
629 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
630 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
631 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
633 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s64_xu10__SVBool_tl(
634 // CPP-CHECK-NEXT: entry:
635 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
636 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
637 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
639 svint64_t test_svdup_n_s64_x(svbool_t pg, int64_t op) MODE_ATTR
641 return SVE_ACLE_FUNC(svdup,_n,_s64_x,)(pg, op);
644 // CHECK-LABEL: @test_svdup_n_u8_x(
645 // CHECK-NEXT: entry:
646 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
647 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
649 // CPP-CHECK-LABEL: @_Z17test_svdup_n_u8_xu10__SVBool_th(
650 // CPP-CHECK-NEXT: entry:
651 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
652 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
654 svuint8_t test_svdup_n_u8_x(svbool_t pg, uint8_t op) MODE_ATTR
656 return SVE_ACLE_FUNC(svdup,_n,_u8_x,)(pg, op);
659 // CHECK-LABEL: @test_svdup_n_u16_x(
660 // CHECK-NEXT: entry:
661 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
662 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
663 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
665 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u16_xu10__SVBool_tt(
666 // CPP-CHECK-NEXT: entry:
667 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
668 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
669 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
671 svuint16_t test_svdup_n_u16_x(svbool_t pg, uint16_t op) MODE_ATTR
673 return SVE_ACLE_FUNC(svdup,_n,_u16_x,)(pg, op);
676 // CHECK-LABEL: @test_svdup_n_u32_x(
677 // CHECK-NEXT: entry:
678 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
679 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
680 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
682 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u32_xu10__SVBool_tj(
683 // CPP-CHECK-NEXT: entry:
684 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
685 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
686 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
688 svuint32_t test_svdup_n_u32_x(svbool_t pg, uint32_t op) MODE_ATTR
690 return SVE_ACLE_FUNC(svdup,_n,_u32_x,)(pg, op);
693 // CHECK-LABEL: @test_svdup_n_u64_x(
694 // CHECK-NEXT: entry:
695 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
696 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
697 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
699 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u64_xu10__SVBool_tm(
700 // CPP-CHECK-NEXT: entry:
701 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
702 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
703 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
705 svuint64_t test_svdup_n_u64_x(svbool_t pg, uint64_t op) MODE_ATTR
707 return SVE_ACLE_FUNC(svdup,_n,_u64_x,)(pg, op);
710 // CHECK-LABEL: @test_svdup_n_f16_x(
711 // CHECK-NEXT: entry:
712 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
713 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
714 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
716 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f16_xu10__SVBool_tDh(
717 // CPP-CHECK-NEXT: entry:
718 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
719 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
720 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
722 svfloat16_t test_svdup_n_f16_x(svbool_t pg, float16_t op) MODE_ATTR
724 return SVE_ACLE_FUNC(svdup,_n,_f16_x,)(pg, op);
727 // CHECK-LABEL: @test_svdup_n_f32_x(
728 // CHECK-NEXT: entry:
729 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
730 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
731 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
733 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f32_xu10__SVBool_tf(
734 // CPP-CHECK-NEXT: entry:
735 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
736 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
737 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
739 svfloat32_t test_svdup_n_f32_x(svbool_t pg, float32_t op) MODE_ATTR
741 return SVE_ACLE_FUNC(svdup,_n,_f32_x,)(pg, op);
744 // CHECK-LABEL: @test_svdup_n_f64_x(
745 // CHECK-NEXT: entry:
746 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
747 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
748 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
750 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f64_xu10__SVBool_td(
751 // CPP-CHECK-NEXT: entry:
752 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
753 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
754 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
756 svfloat64_t test_svdup_n_f64_x(svbool_t pg, float64_t op) MODE_ATTR
758 return SVE_ACLE_FUNC(svdup,_n,_f64_x,)(pg, op);
761 // CHECK-LABEL: @test_svdup_lane_s8(
762 // CHECK-NEXT: entry:
763 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[INDEX:%.*]], i64 0
764 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
765 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
766 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
768 // CPP-CHECK-LABEL: @_Z18test_svdup_lane_s8u10__SVInt8_th(
769 // CPP-CHECK-NEXT: entry:
770 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[INDEX:%.*]], i64 0
771 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
772 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
773 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
775 svint8_t test_svdup_lane_s8(svint8_t data, uint8_t index) MODE_ATTR
777 return SVE_ACLE_FUNC(svdup_lane,_s8,,)(data, index);
780 // CHECK-LABEL: @test_svdup_lane_s16(
781 // CHECK-NEXT: entry:
782 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
783 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
784 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
785 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
787 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_s16u11__SVInt16_tt(
788 // CPP-CHECK-NEXT: entry:
789 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
790 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
791 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
792 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
794 svint16_t test_svdup_lane_s16(svint16_t data, uint16_t index) MODE_ATTR
796 return SVE_ACLE_FUNC(svdup_lane,_s16,,)(data, index);
799 // CHECK-LABEL: @test_svdup_lane_s32(
800 // CHECK-NEXT: entry:
801 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
802 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
803 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
804 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
806 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_s32u11__SVInt32_tj(
807 // CPP-CHECK-NEXT: entry:
808 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
809 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
810 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
811 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
813 svint32_t test_svdup_lane_s32(svint32_t data, uint32_t index) MODE_ATTR
815 return SVE_ACLE_FUNC(svdup_lane,_s32,,)(data, index);
818 // CHECK-LABEL: @test_svdup_lane_s64(
819 // CHECK-NEXT: entry:
820 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
821 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
822 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
823 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
825 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_s64u11__SVInt64_tm(
826 // CPP-CHECK-NEXT: entry:
827 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
828 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
829 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
830 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
832 svint64_t test_svdup_lane_s64(svint64_t data, uint64_t index) MODE_ATTR
834 return SVE_ACLE_FUNC(svdup_lane,_s64,,)(data, index);
837 // CHECK-LABEL: @test_svdup_lane_u8(
838 // CHECK-NEXT: entry:
839 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[INDEX:%.*]], i64 0
840 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
841 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
842 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
844 // CPP-CHECK-LABEL: @_Z18test_svdup_lane_u8u11__SVUint8_th(
845 // CPP-CHECK-NEXT: entry:
846 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[INDEX:%.*]], i64 0
847 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
848 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
849 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
851 svuint8_t test_svdup_lane_u8(svuint8_t data, uint8_t index) MODE_ATTR
853 return SVE_ACLE_FUNC(svdup_lane,_u8,,)(data, index);
856 // CHECK-LABEL: @test_svdup_lane_u16(
857 // CHECK-NEXT: entry:
858 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
859 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
860 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
861 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
863 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_u16u12__SVUint16_tt(
864 // CPP-CHECK-NEXT: entry:
865 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
866 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
867 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
868 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
870 svuint16_t test_svdup_lane_u16(svuint16_t data, uint16_t index) MODE_ATTR
872 return SVE_ACLE_FUNC(svdup_lane,_u16,,)(data, index);
875 // CHECK-LABEL: @test_svdup_lane_u32(
876 // CHECK-NEXT: entry:
877 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
878 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
879 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
880 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
882 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_u32u12__SVUint32_tj(
883 // CPP-CHECK-NEXT: entry:
884 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
885 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
886 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
887 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
889 svuint32_t test_svdup_lane_u32(svuint32_t data, uint32_t index) MODE_ATTR
891 return SVE_ACLE_FUNC(svdup_lane,_u32,,)(data, index);
894 // CHECK-LABEL: @test_svdup_lane_u64(
895 // CHECK-NEXT: entry:
896 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
897 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
898 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
899 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
901 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_u64u12__SVUint64_tm(
902 // CPP-CHECK-NEXT: entry:
903 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
904 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
905 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
906 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
908 svuint64_t test_svdup_lane_u64(svuint64_t data, uint64_t index) MODE_ATTR
910 return SVE_ACLE_FUNC(svdup_lane,_u64,,)(data, index);
913 // CHECK-LABEL: @test_svdup_lane_f16(
914 // CHECK-NEXT: entry:
915 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
916 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
917 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.tbl.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
918 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
920 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_f16u13__SVFloat16_tt(
921 // CPP-CHECK-NEXT: entry:
922 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
923 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
924 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.tbl.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
925 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
927 svfloat16_t test_svdup_lane_f16(svfloat16_t data, uint16_t index) MODE_ATTR
929 return SVE_ACLE_FUNC(svdup_lane,_f16,,)(data, index);
932 // CHECK-LABEL: @test_svdup_lane_f32(
933 // CHECK-NEXT: entry:
934 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
935 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
936 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.tbl.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
937 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
939 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_f32u13__SVFloat32_tj(
940 // CPP-CHECK-NEXT: entry:
941 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
942 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
943 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.tbl.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
944 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
946 svfloat32_t test_svdup_lane_f32(svfloat32_t data, uint32_t index) MODE_ATTR
948 return SVE_ACLE_FUNC(svdup_lane,_f32,,)(data, index);
951 // CHECK-LABEL: @test_svdup_lane_f64(
952 // CHECK-NEXT: entry:
953 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
954 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
955 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.tbl.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
956 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
958 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_f64u13__SVFloat64_tm(
959 // CPP-CHECK-NEXT: entry:
960 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
961 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
962 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.tbl.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
963 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
965 svfloat64_t test_svdup_lane_f64(svfloat64_t data, uint64_t index) MODE_ATTR
967 return SVE_ACLE_FUNC(svdup_lane,_f64,,)(data, index);
970 // CHECK-LABEL: @test_svdup_n_b8(
971 // CHECK-NEXT: entry:
972 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i1> poison, i1 [[OP:%.*]], i64 0
973 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i1> [[DOTSPLATINSERT]], <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
974 // CHECK-NEXT: ret <vscale x 16 x i1> [[DOTSPLAT]]
976 // CPP-CHECK-LABEL: @_Z15test_svdup_n_b8b(
977 // CPP-CHECK-NEXT: entry:
978 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i1> poison, i1 [[OP:%.*]], i64 0
979 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i1> [[DOTSPLATINSERT]], <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
980 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[DOTSPLAT]]
982 svbool_t test_svdup_n_b8(bool op) MODE_ATTR
984 return SVE_ACLE_FUNC(svdup,_n,_b8,)(op);
987 // CHECK-LABEL: @test_svdup_n_b16(
988 // CHECK-NEXT: entry:
989 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i1> poison, i1 [[OP:%.*]], i64 0
990 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i1> [[DOTSPLATINSERT]], <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
991 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[DOTSPLAT]])
992 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
994 // CPP-CHECK-LABEL: @_Z16test_svdup_n_b16b(
995 // CPP-CHECK-NEXT: entry:
996 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i1> poison, i1 [[OP:%.*]], i64 0
997 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i1> [[DOTSPLATINSERT]], <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
998 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[DOTSPLAT]])
999 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1001 svbool_t test_svdup_n_b16(bool op) MODE_ATTR
1003 return SVE_ACLE_FUNC(svdup,_n,_b16,)(op);
1006 // CHECK-LABEL: @test_svdup_n_b32(
1007 // CHECK-NEXT: entry:
1008 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i1> poison, i1 [[OP:%.*]], i64 0
1009 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i1> [[DOTSPLATINSERT]], <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1010 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[DOTSPLAT]])
1011 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1013 // CPP-CHECK-LABEL: @_Z16test_svdup_n_b32b(
1014 // CPP-CHECK-NEXT: entry:
1015 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i1> poison, i1 [[OP:%.*]], i64 0
1016 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i1> [[DOTSPLATINSERT]], <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1017 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[DOTSPLAT]])
1018 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1020 svbool_t test_svdup_n_b32(bool op) MODE_ATTR
1022 return SVE_ACLE_FUNC(svdup,_n,_b32,)(op);
1025 // CHECK-LABEL: @test_svdup_n_b64(
1026 // CHECK-NEXT: entry:
1027 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[OP:%.*]], i64 0
1028 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i1> [[DOTSPLATINSERT]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1029 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[DOTSPLAT]])
1030 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1032 // CPP-CHECK-LABEL: @_Z16test_svdup_n_b64b(
1033 // CPP-CHECK-NEXT: entry:
1034 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[OP:%.*]], i64 0
1035 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i1> [[DOTSPLATINSERT]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1036 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[DOTSPLAT]])
1037 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1039 svbool_t test_svdup_n_b64(bool op) MODE_ATTR
1041 return SVE_ACLE_FUNC(svdup,_n,_b64,)(op);