Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / aarch64-sve-intrinsics / acle_sve_dup.c
blob15806362b3cbeb19a1ac57a916b39839becbfb13
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -o /dev/null %s
8 #include <arm_sve.h>
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
13 #else
14 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
15 #endif
17 // CHECK-LABEL: @test_svdup_n_s8(
18 // CHECK-NEXT: entry:
19 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP:%.*]], i64 0
20 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
21 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
23 // CPP-CHECK-LABEL: @_Z15test_svdup_n_s8a(
24 // CPP-CHECK-NEXT: entry:
25 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP:%.*]], i64 0
26 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
27 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
29 svint8_t test_svdup_n_s8(int8_t op)
31 return SVE_ACLE_FUNC(svdup,_n,_s8,)(op);
34 // CHECK-LABEL: @test_svdup_n_s16(
35 // CHECK-NEXT: entry:
36 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP:%.*]], i64 0
37 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
38 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
40 // CPP-CHECK-LABEL: @_Z16test_svdup_n_s16s(
41 // CPP-CHECK-NEXT: entry:
42 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP:%.*]], i64 0
43 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
44 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
46 svint16_t test_svdup_n_s16(int16_t op)
48 return SVE_ACLE_FUNC(svdup,_n,_s16,)(op);
51 // CHECK-LABEL: @test_svdup_n_s32(
52 // CHECK-NEXT: entry:
53 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP:%.*]], i64 0
54 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
55 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
57 // CPP-CHECK-LABEL: @_Z16test_svdup_n_s32i(
58 // CPP-CHECK-NEXT: entry:
59 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP:%.*]], i64 0
60 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
61 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
63 svint32_t test_svdup_n_s32(int32_t op)
65 return SVE_ACLE_FUNC(svdup,_n,_s32,)(op);
68 // CHECK-LABEL: @test_svdup_n_s64(
69 // CHECK-NEXT: entry:
70 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP:%.*]], i64 0
71 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
72 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
74 // CPP-CHECK-LABEL: @_Z16test_svdup_n_s64l(
75 // CPP-CHECK-NEXT: entry:
76 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP:%.*]], i64 0
77 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
78 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
80 svint64_t test_svdup_n_s64(int64_t op)
82 return SVE_ACLE_FUNC(svdup,_n,_s64,)(op);
85 // CHECK-LABEL: @test_svdup_n_u8(
86 // CHECK-NEXT: entry:
87 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP:%.*]], i64 0
88 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
89 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
91 // CPP-CHECK-LABEL: @_Z15test_svdup_n_u8h(
92 // CPP-CHECK-NEXT: entry:
93 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP:%.*]], i64 0
94 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
95 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
97 svuint8_t test_svdup_n_u8(uint8_t op)
99 return SVE_ACLE_FUNC(svdup,_n,_u8,)(op);
102 // CHECK-LABEL: @test_svdup_n_u16(
103 // CHECK-NEXT: entry:
104 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP:%.*]], i64 0
105 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
106 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
108 // CPP-CHECK-LABEL: @_Z16test_svdup_n_u16t(
109 // CPP-CHECK-NEXT: entry:
110 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP:%.*]], i64 0
111 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
112 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
114 svuint16_t test_svdup_n_u16(uint16_t op)
116 return SVE_ACLE_FUNC(svdup,_n,_u16,)(op);
119 // CHECK-LABEL: @test_svdup_n_u32(
120 // CHECK-NEXT: entry:
121 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP:%.*]], i64 0
122 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
123 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
125 // CPP-CHECK-LABEL: @_Z16test_svdup_n_u32j(
126 // CPP-CHECK-NEXT: entry:
127 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP:%.*]], i64 0
128 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
129 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
131 svuint32_t test_svdup_n_u32(uint32_t op)
133 return SVE_ACLE_FUNC(svdup,_n,_u32,)(op);
136 // CHECK-LABEL: @test_svdup_n_u64(
137 // CHECK-NEXT: entry:
138 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP:%.*]], i64 0
139 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
140 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
142 // CPP-CHECK-LABEL: @_Z16test_svdup_n_u64m(
143 // CPP-CHECK-NEXT: entry:
144 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP:%.*]], i64 0
145 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
146 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
148 svuint64_t test_svdup_n_u64(uint64_t op)
150 return SVE_ACLE_FUNC(svdup,_n,_u64,)(op);
153 // CHECK-LABEL: @test_svdup_n_f16(
154 // CHECK-NEXT: entry:
155 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[OP:%.*]], i64 0
156 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x half> [[DOTSPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
157 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
159 // CPP-CHECK-LABEL: @_Z16test_svdup_n_f16Dh(
160 // CPP-CHECK-NEXT: entry:
161 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[OP:%.*]], i64 0
162 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 8 x half> [[DOTSPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
163 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
165 svfloat16_t test_svdup_n_f16(float16_t op)
167 return SVE_ACLE_FUNC(svdup,_n,_f16,)(op);
170 // CHECK-LABEL: @test_svdup_n_f32(
171 // CHECK-NEXT: entry:
172 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[OP:%.*]], i64 0
173 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x float> [[DOTSPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
174 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
176 // CPP-CHECK-LABEL: @_Z16test_svdup_n_f32f(
177 // CPP-CHECK-NEXT: entry:
178 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[OP:%.*]], i64 0
179 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 4 x float> [[DOTSPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
180 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
182 svfloat32_t test_svdup_n_f32(float32_t op)
184 return SVE_ACLE_FUNC(svdup,_n,_f32,)(op);
187 // CHECK-LABEL: @test_svdup_n_f64(
188 // CHECK-NEXT: entry:
189 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[OP:%.*]], i64 0
190 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x double> [[DOTSPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
191 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
193 // CPP-CHECK-LABEL: @_Z16test_svdup_n_f64d(
194 // CPP-CHECK-NEXT: entry:
195 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[OP:%.*]], i64 0
196 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 2 x double> [[DOTSPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
197 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
199 svfloat64_t test_svdup_n_f64(float64_t op)
201 return SVE_ACLE_FUNC(svdup,_n,_f64,)(op);
204 // CHECK-LABEL: @test_svdup_n_s8_z(
205 // CHECK-NEXT: entry:
206 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
207 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
209 // CPP-CHECK-LABEL: @_Z17test_svdup_n_s8_zu10__SVBool_ta(
210 // CPP-CHECK-NEXT: entry:
211 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
212 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
214 svint8_t test_svdup_n_s8_z(svbool_t pg, int8_t op)
216 return SVE_ACLE_FUNC(svdup,_n,_s8_z,)(pg, op);
219 // CHECK-LABEL: @test_svdup_n_s16_z(
220 // CHECK-NEXT: entry:
221 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
222 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
223 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
225 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s16_zu10__SVBool_ts(
226 // CPP-CHECK-NEXT: entry:
227 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
228 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
229 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
231 svint16_t test_svdup_n_s16_z(svbool_t pg, int16_t op)
233 return SVE_ACLE_FUNC(svdup,_n,_s16_z,)(pg, op);
236 // CHECK-LABEL: @test_svdup_n_s32_z(
237 // CHECK-NEXT: entry:
238 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
239 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
240 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
242 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s32_zu10__SVBool_ti(
243 // CPP-CHECK-NEXT: entry:
244 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
245 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
246 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
248 svint32_t test_svdup_n_s32_z(svbool_t pg, int32_t op)
250 return SVE_ACLE_FUNC(svdup,_n,_s32_z,)(pg, op);
253 // CHECK-LABEL: @test_svdup_n_s64_z(
254 // CHECK-NEXT: entry:
255 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
256 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
257 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
259 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s64_zu10__SVBool_tl(
260 // CPP-CHECK-NEXT: entry:
261 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
262 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
263 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
265 svint64_t test_svdup_n_s64_z(svbool_t pg, int64_t op)
267 return SVE_ACLE_FUNC(svdup,_n,_s64_z,)(pg, op);
270 // CHECK-LABEL: @test_svdup_n_u8_z(
271 // CHECK-NEXT: entry:
272 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
273 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
275 // CPP-CHECK-LABEL: @_Z17test_svdup_n_u8_zu10__SVBool_th(
276 // CPP-CHECK-NEXT: entry:
277 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
278 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
280 svuint8_t test_svdup_n_u8_z(svbool_t pg, uint8_t op)
282 return SVE_ACLE_FUNC(svdup,_n,_u8_z,)(pg, op);
285 // CHECK-LABEL: @test_svdup_n_u16_z(
286 // CHECK-NEXT: entry:
287 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
288 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
289 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
291 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u16_zu10__SVBool_tt(
292 // CPP-CHECK-NEXT: entry:
293 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
294 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
295 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
297 svuint16_t test_svdup_n_u16_z(svbool_t pg, uint16_t op)
299 return SVE_ACLE_FUNC(svdup,_n,_u16_z,)(pg, op);
302 // CHECK-LABEL: @test_svdup_n_u32_z(
303 // CHECK-NEXT: entry:
304 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
305 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
306 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
308 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u32_zu10__SVBool_tj(
309 // CPP-CHECK-NEXT: entry:
310 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
311 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
312 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
314 svuint32_t test_svdup_n_u32_z(svbool_t pg, uint32_t op)
316 return SVE_ACLE_FUNC(svdup,_n,_u32_z,)(pg, op);
319 // CHECK-LABEL: @test_svdup_n_u64_z(
320 // CHECK-NEXT: entry:
321 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
322 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
323 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
325 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u64_zu10__SVBool_tm(
326 // CPP-CHECK-NEXT: entry:
327 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
328 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
329 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
331 svuint64_t test_svdup_n_u64_z(svbool_t pg, uint64_t op)
333 return SVE_ACLE_FUNC(svdup,_n,_u64_z,)(pg, op);
336 // CHECK-LABEL: @test_svdup_n_f16_z(
337 // CHECK-NEXT: entry:
338 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
339 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
340 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
342 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f16_zu10__SVBool_tDh(
343 // CPP-CHECK-NEXT: entry:
344 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
345 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
346 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
348 svfloat16_t test_svdup_n_f16_z(svbool_t pg, float16_t op)
350 return SVE_ACLE_FUNC(svdup,_n,_f16_z,)(pg, op);
353 // CHECK-LABEL: @test_svdup_n_f32_z(
354 // CHECK-NEXT: entry:
355 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
356 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
357 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
359 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f32_zu10__SVBool_tf(
360 // CPP-CHECK-NEXT: entry:
361 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
362 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
363 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
365 svfloat32_t test_svdup_n_f32_z(svbool_t pg, float32_t op)
367 return SVE_ACLE_FUNC(svdup,_n,_f32_z,)(pg, op);
370 // CHECK-LABEL: @test_svdup_n_f64_z(
371 // CHECK-NEXT: entry:
372 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
373 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
374 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
376 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f64_zu10__SVBool_td(
377 // CPP-CHECK-NEXT: entry:
378 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
379 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
380 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
382 svfloat64_t test_svdup_n_f64_z(svbool_t pg, float64_t op)
384 return SVE_ACLE_FUNC(svdup,_n,_f64_z,)(pg, op);
387 // CHECK-LABEL: @test_svdup_n_s8_m(
388 // CHECK-NEXT: entry:
389 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
390 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
392 // CPP-CHECK-LABEL: @_Z17test_svdup_n_s8_mu10__SVInt8_tu10__SVBool_ta(
393 // CPP-CHECK-NEXT: entry:
394 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
395 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
397 svint8_t test_svdup_n_s8_m(svint8_t inactive, svbool_t pg, int8_t op)
399 return SVE_ACLE_FUNC(svdup,_n,_s8_m,)(inactive, pg, op);
402 // CHECK-LABEL: @test_svdup_n_s16_m(
403 // CHECK-NEXT: entry:
404 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
405 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
406 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
408 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s16_mu11__SVInt16_tu10__SVBool_ts(
409 // CPP-CHECK-NEXT: entry:
410 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
411 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
412 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
414 svint16_t test_svdup_n_s16_m(svint16_t inactive, svbool_t pg, int16_t op)
416 return SVE_ACLE_FUNC(svdup,_n,_s16_m,)(inactive, pg, op);
419 // CHECK-LABEL: @test_svdup_n_s32_m(
420 // CHECK-NEXT: entry:
421 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
422 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
423 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
425 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s32_mu11__SVInt32_tu10__SVBool_ti(
426 // CPP-CHECK-NEXT: entry:
427 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
428 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
429 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
431 svint32_t test_svdup_n_s32_m(svint32_t inactive, svbool_t pg, int32_t op)
433 return SVE_ACLE_FUNC(svdup,_n,_s32_m,)(inactive, pg, op);
436 // CHECK-LABEL: @test_svdup_n_s64_m(
437 // CHECK-NEXT: entry:
438 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
439 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
440 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
442 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s64_mu11__SVInt64_tu10__SVBool_tl(
443 // CPP-CHECK-NEXT: entry:
444 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
445 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
446 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
448 svint64_t test_svdup_n_s64_m(svint64_t inactive, svbool_t pg, int64_t op)
450 return SVE_ACLE_FUNC(svdup,_n,_s64_m,)(inactive, pg, op);
453 // CHECK-LABEL: @test_svdup_n_u8_m(
454 // CHECK-NEXT: entry:
455 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
456 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
458 // CPP-CHECK-LABEL: @_Z17test_svdup_n_u8_mu11__SVUint8_tu10__SVBool_th(
459 // CPP-CHECK-NEXT: entry:
460 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> [[INACTIVE:%.*]], <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
461 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
463 svuint8_t test_svdup_n_u8_m(svuint8_t inactive, svbool_t pg, uint8_t op)
465 return SVE_ACLE_FUNC(svdup,_n,_u8_m,)(inactive, pg, op);
468 // CHECK-LABEL: @test_svdup_n_u16_m(
469 // CHECK-NEXT: entry:
470 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
471 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
472 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
474 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u16_mu12__SVUint16_tu10__SVBool_tt(
475 // CPP-CHECK-NEXT: entry:
476 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
477 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
478 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
480 svuint16_t test_svdup_n_u16_m(svuint16_t inactive, svbool_t pg, uint16_t op)
482 return SVE_ACLE_FUNC(svdup,_n,_u16_m,)(inactive, pg, op);
485 // CHECK-LABEL: @test_svdup_n_u32_m(
486 // CHECK-NEXT: entry:
487 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
488 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
489 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
491 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u32_mu12__SVUint32_tu10__SVBool_tj(
492 // CPP-CHECK-NEXT: entry:
493 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
494 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
495 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
497 svuint32_t test_svdup_n_u32_m(svuint32_t inactive, svbool_t pg, uint32_t op)
499 return SVE_ACLE_FUNC(svdup,_n,_u32_m,)(inactive, pg, op);
502 // CHECK-LABEL: @test_svdup_n_u64_m(
503 // CHECK-NEXT: entry:
504 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
505 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
506 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
508 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u64_mu12__SVUint64_tu10__SVBool_tm(
509 // CPP-CHECK-NEXT: entry:
510 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
511 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
512 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
514 svuint64_t test_svdup_n_u64_m(svuint64_t inactive, svbool_t pg, uint64_t op)
516 return SVE_ACLE_FUNC(svdup,_n,_u64_m,)(inactive, pg, op);
519 // CHECK-LABEL: @test_svdup_n_f16_m(
520 // CHECK-NEXT: entry:
521 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
522 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
523 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
525 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f16_mu13__SVFloat16_tu10__SVBool_tDh(
526 // CPP-CHECK-NEXT: entry:
527 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
528 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> [[INACTIVE:%.*]], <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
529 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
531 svfloat16_t test_svdup_n_f16_m(svfloat16_t inactive, svbool_t pg, float16_t op)
533 return SVE_ACLE_FUNC(svdup,_n,_f16_m,)(inactive, pg, op);
536 // CHECK-LABEL: @test_svdup_n_f32_m(
537 // CHECK-NEXT: entry:
538 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
539 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
540 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
542 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f32_mu13__SVFloat32_tu10__SVBool_tf(
543 // CPP-CHECK-NEXT: entry:
544 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
545 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> [[INACTIVE:%.*]], <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
546 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
548 svfloat32_t test_svdup_n_f32_m(svfloat32_t inactive, svbool_t pg, float32_t op)
550 return SVE_ACLE_FUNC(svdup,_n,_f32_m,)(inactive, pg, op);
553 // CHECK-LABEL: @test_svdup_n_f64_m(
554 // CHECK-NEXT: entry:
555 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
556 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
557 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
559 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f64_mu13__SVFloat64_tu10__SVBool_td(
560 // CPP-CHECK-NEXT: entry:
561 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
562 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> [[INACTIVE:%.*]], <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
563 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
565 svfloat64_t test_svdup_n_f64_m(svfloat64_t inactive, svbool_t pg, float64_t op)
567 return SVE_ACLE_FUNC(svdup,_n,_f64_m,)(inactive, pg, op);
570 // CHECK-LABEL: @test_svdup_n_s8_x(
571 // CHECK-NEXT: entry:
572 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
573 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
575 // CPP-CHECK-LABEL: @_Z17test_svdup_n_s8_xu10__SVBool_ta(
576 // CPP-CHECK-NEXT: entry:
577 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
578 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
580 svint8_t test_svdup_n_s8_x(svbool_t pg, int8_t op)
582 return SVE_ACLE_FUNC(svdup,_n,_s8_x,)(pg, op);
585 // CHECK-LABEL: @test_svdup_n_s16_x(
586 // CHECK-NEXT: entry:
587 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
588 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
589 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
591 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s16_xu10__SVBool_ts(
592 // CPP-CHECK-NEXT: entry:
593 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
594 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
595 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
597 svint16_t test_svdup_n_s16_x(svbool_t pg, int16_t op)
599 return SVE_ACLE_FUNC(svdup,_n,_s16_x,)(pg, op);
602 // CHECK-LABEL: @test_svdup_n_s32_x(
603 // CHECK-NEXT: entry:
604 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
605 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
606 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
608 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s32_xu10__SVBool_ti(
609 // CPP-CHECK-NEXT: entry:
610 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
611 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
612 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
614 svint32_t test_svdup_n_s32_x(svbool_t pg, int32_t op)
616 return SVE_ACLE_FUNC(svdup,_n,_s32_x,)(pg, op);
619 // CHECK-LABEL: @test_svdup_n_s64_x(
620 // CHECK-NEXT: entry:
621 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
622 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
623 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
625 // CPP-CHECK-LABEL: @_Z18test_svdup_n_s64_xu10__SVBool_tl(
626 // CPP-CHECK-NEXT: entry:
627 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
628 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
629 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
631 svint64_t test_svdup_n_s64_x(svbool_t pg, int64_t op)
633 return SVE_ACLE_FUNC(svdup,_n,_s64_x,)(pg, op);
636 // CHECK-LABEL: @test_svdup_n_u8_x(
637 // CHECK-NEXT: entry:
638 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
639 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
641 // CPP-CHECK-LABEL: @_Z17test_svdup_n_u8_xu10__SVBool_th(
642 // CPP-CHECK-NEXT: entry:
643 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[PG:%.*]], i8 [[OP:%.*]])
644 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
646 svuint8_t test_svdup_n_u8_x(svbool_t pg, uint8_t op)
648 return SVE_ACLE_FUNC(svdup,_n,_u8_x,)(pg, op);
651 // CHECK-LABEL: @test_svdup_n_u16_x(
652 // CHECK-NEXT: entry:
653 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
654 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
655 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
657 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u16_xu10__SVBool_tt(
658 // CPP-CHECK-NEXT: entry:
659 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
660 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[TMP0]], i16 [[OP:%.*]])
661 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
663 svuint16_t test_svdup_n_u16_x(svbool_t pg, uint16_t op)
665 return SVE_ACLE_FUNC(svdup,_n,_u16_x,)(pg, op);
668 // CHECK-LABEL: @test_svdup_n_u32_x(
669 // CHECK-NEXT: entry:
670 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
671 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
672 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
674 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u32_xu10__SVBool_tj(
675 // CPP-CHECK-NEXT: entry:
676 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
677 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[TMP0]], i32 [[OP:%.*]])
678 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
680 svuint32_t test_svdup_n_u32_x(svbool_t pg, uint32_t op)
682 return SVE_ACLE_FUNC(svdup,_n,_u32_x,)(pg, op);
685 // CHECK-LABEL: @test_svdup_n_u64_x(
686 // CHECK-NEXT: entry:
687 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
688 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
689 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
691 // CPP-CHECK-LABEL: @_Z18test_svdup_n_u64_xu10__SVBool_tm(
692 // CPP-CHECK-NEXT: entry:
693 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
694 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[TMP0]], i64 [[OP:%.*]])
695 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
697 svuint64_t test_svdup_n_u64_x(svbool_t pg, uint64_t op)
699 return SVE_ACLE_FUNC(svdup,_n,_u64_x,)(pg, op);
702 // CHECK-LABEL: @test_svdup_n_f16_x(
703 // CHECK-NEXT: entry:
704 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
705 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
706 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
708 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f16_xu10__SVBool_tDh(
709 // CPP-CHECK-NEXT: entry:
710 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
711 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> [[TMP0]], half [[OP:%.*]])
712 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
714 svfloat16_t test_svdup_n_f16_x(svbool_t pg, float16_t op)
716 return SVE_ACLE_FUNC(svdup,_n,_f16_x,)(pg, op);
719 // CHECK-LABEL: @test_svdup_n_f32_x(
720 // CHECK-NEXT: entry:
721 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
722 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
723 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
725 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f32_xu10__SVBool_tf(
726 // CPP-CHECK-NEXT: entry:
727 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
728 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> [[TMP0]], float [[OP:%.*]])
729 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
731 svfloat32_t test_svdup_n_f32_x(svbool_t pg, float32_t op)
733 return SVE_ACLE_FUNC(svdup,_n,_f32_x,)(pg, op);
736 // CHECK-LABEL: @test_svdup_n_f64_x(
737 // CHECK-NEXT: entry:
738 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
739 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
740 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
742 // CPP-CHECK-LABEL: @_Z18test_svdup_n_f64_xu10__SVBool_td(
743 // CPP-CHECK-NEXT: entry:
744 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
745 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> [[TMP0]], double [[OP:%.*]])
746 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
748 svfloat64_t test_svdup_n_f64_x(svbool_t pg, float64_t op)
750 return SVE_ACLE_FUNC(svdup,_n,_f64_x,)(pg, op);
753 // CHECK-LABEL: @test_svdup_lane_s8(
754 // CHECK-NEXT: entry:
755 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[INDEX:%.*]], i64 0
756 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
757 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
758 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
760 // CPP-CHECK-LABEL: @_Z18test_svdup_lane_s8u10__SVInt8_th(
761 // CPP-CHECK-NEXT: entry:
762 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[INDEX:%.*]], i64 0
763 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
764 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
765 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
767 svint8_t test_svdup_lane_s8(svint8_t data, uint8_t index)
769 return SVE_ACLE_FUNC(svdup_lane,_s8,,)(data, index);
772 // CHECK-LABEL: @test_svdup_lane_s16(
773 // CHECK-NEXT: entry:
774 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
775 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
776 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
777 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
779 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_s16u11__SVInt16_tt(
780 // CPP-CHECK-NEXT: entry:
781 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
782 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
783 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
784 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
786 svint16_t test_svdup_lane_s16(svint16_t data, uint16_t index)
788 return SVE_ACLE_FUNC(svdup_lane,_s16,,)(data, index);
791 // CHECK-LABEL: @test_svdup_lane_s32(
792 // CHECK-NEXT: entry:
793 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
794 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
795 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
796 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
798 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_s32u11__SVInt32_tj(
799 // CPP-CHECK-NEXT: entry:
800 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
801 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
802 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
803 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
805 svint32_t test_svdup_lane_s32(svint32_t data, uint32_t index)
807 return SVE_ACLE_FUNC(svdup_lane,_s32,,)(data, index);
810 // CHECK-LABEL: @test_svdup_lane_s64(
811 // CHECK-NEXT: entry:
812 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
813 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
814 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
815 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
817 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_s64u11__SVInt64_tm(
818 // CPP-CHECK-NEXT: entry:
819 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
820 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
821 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
822 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
824 svint64_t test_svdup_lane_s64(svint64_t data, uint64_t index)
826 return SVE_ACLE_FUNC(svdup_lane,_s64,,)(data, index);
829 // CHECK-LABEL: @test_svdup_lane_u8(
830 // CHECK-NEXT: entry:
831 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[INDEX:%.*]], i64 0
832 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
833 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
834 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
836 // CPP-CHECK-LABEL: @_Z18test_svdup_lane_u8u11__SVUint8_th(
837 // CPP-CHECK-NEXT: entry:
838 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[INDEX:%.*]], i64 0
839 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
840 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
841 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
843 svuint8_t test_svdup_lane_u8(svuint8_t data, uint8_t index)
845 return SVE_ACLE_FUNC(svdup_lane,_u8,,)(data, index);
848 // CHECK-LABEL: @test_svdup_lane_u16(
849 // CHECK-NEXT: entry:
850 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
851 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
852 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
853 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
855 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_u16u12__SVUint16_tt(
856 // CPP-CHECK-NEXT: entry:
857 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
858 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
859 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
860 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
862 svuint16_t test_svdup_lane_u16(svuint16_t data, uint16_t index)
864 return SVE_ACLE_FUNC(svdup_lane,_u16,,)(data, index);
867 // CHECK-LABEL: @test_svdup_lane_u32(
868 // CHECK-NEXT: entry:
869 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
870 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
871 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
872 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
874 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_u32u12__SVUint32_tj(
875 // CPP-CHECK-NEXT: entry:
876 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
877 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
878 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
879 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
881 svuint32_t test_svdup_lane_u32(svuint32_t data, uint32_t index)
883 return SVE_ACLE_FUNC(svdup_lane,_u32,,)(data, index);
886 // CHECK-LABEL: @test_svdup_lane_u64(
887 // CHECK-NEXT: entry:
888 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
889 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
890 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
891 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
893 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_u64u12__SVUint64_tm(
894 // CPP-CHECK-NEXT: entry:
895 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
896 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
897 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
898 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
900 svuint64_t test_svdup_lane_u64(svuint64_t data, uint64_t index)
902 return SVE_ACLE_FUNC(svdup_lane,_u64,,)(data, index);
905 // CHECK-LABEL: @test_svdup_lane_f16(
906 // CHECK-NEXT: entry:
907 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
908 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
909 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.tbl.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
910 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
912 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_f16u13__SVFloat16_tt(
913 // CPP-CHECK-NEXT: entry:
914 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[INDEX:%.*]], i64 0
915 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
916 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.tbl.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
917 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
919 svfloat16_t test_svdup_lane_f16(svfloat16_t data, uint16_t index)
921 return SVE_ACLE_FUNC(svdup_lane,_f16,,)(data, index);
924 // CHECK-LABEL: @test_svdup_lane_f32(
925 // CHECK-NEXT: entry:
926 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
927 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
928 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.tbl.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
929 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
931 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_f32u13__SVFloat32_tj(
932 // CPP-CHECK-NEXT: entry:
933 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[INDEX:%.*]], i64 0
934 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
935 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.tbl.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
936 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
938 svfloat32_t test_svdup_lane_f32(svfloat32_t data, uint32_t index)
940 return SVE_ACLE_FUNC(svdup_lane,_f32,,)(data, index);
943 // CHECK-LABEL: @test_svdup_lane_f64(
944 // CHECK-NEXT: entry:
945 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
946 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
947 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.tbl.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
948 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
950 // CPP-CHECK-LABEL: @_Z19test_svdup_lane_f64u13__SVFloat64_tm(
951 // CPP-CHECK-NEXT: entry:
952 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX:%.*]], i64 0
953 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
954 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.tbl.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
955 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
957 svfloat64_t test_svdup_lane_f64(svfloat64_t data, uint64_t index)
959 return SVE_ACLE_FUNC(svdup_lane,_f64,,)(data, index);
962 // CHECK-LABEL: @test_svdup_n_b8(
963 // CHECK-NEXT: entry:
964 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i1> poison, i1 [[OP:%.*]], i64 0
965 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i1> [[DOTSPLATINSERT]], <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
966 // CHECK-NEXT: ret <vscale x 16 x i1> [[DOTSPLAT]]
968 // CPP-CHECK-LABEL: @_Z15test_svdup_n_b8b(
969 // CPP-CHECK-NEXT: entry:
970 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i1> poison, i1 [[OP:%.*]], i64 0
971 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i1> [[DOTSPLATINSERT]], <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
972 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[DOTSPLAT]]
974 svbool_t test_svdup_n_b8(bool op)
976 return SVE_ACLE_FUNC(svdup,_n,_b8,)(op);
979 // CHECK-LABEL: @test_svdup_n_b16(
980 // CHECK-NEXT: entry:
981 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i1> poison, i1 [[OP:%.*]], i64 0
982 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i1> [[DOTSPLATINSERT]], <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
983 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[DOTSPLAT]])
984 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
986 // CPP-CHECK-LABEL: @_Z16test_svdup_n_b16b(
987 // CPP-CHECK-NEXT: entry:
988 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i1> poison, i1 [[OP:%.*]], i64 0
989 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i1> [[DOTSPLATINSERT]], <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
990 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[DOTSPLAT]])
991 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
993 svbool_t test_svdup_n_b16(bool op)
995 return SVE_ACLE_FUNC(svdup,_n,_b16,)(op);
998 // CHECK-LABEL: @test_svdup_n_b32(
999 // CHECK-NEXT: entry:
1000 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i1> poison, i1 [[OP:%.*]], i64 0
1001 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i1> [[DOTSPLATINSERT]], <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1002 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[DOTSPLAT]])
1003 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1005 // CPP-CHECK-LABEL: @_Z16test_svdup_n_b32b(
1006 // CPP-CHECK-NEXT: entry:
1007 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i1> poison, i1 [[OP:%.*]], i64 0
1008 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i1> [[DOTSPLATINSERT]], <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1009 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[DOTSPLAT]])
1010 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1012 svbool_t test_svdup_n_b32(bool op)
1014 return SVE_ACLE_FUNC(svdup,_n,_b32,)(op);
1017 // CHECK-LABEL: @test_svdup_n_b64(
1018 // CHECK-NEXT: entry:
1019 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[OP:%.*]], i64 0
1020 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i1> [[DOTSPLATINSERT]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1021 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[DOTSPLAT]])
1022 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1024 // CPP-CHECK-LABEL: @_Z16test_svdup_n_b64b(
1025 // CPP-CHECK-NEXT: entry:
1026 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[OP:%.*]], i64 0
1027 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i1> [[DOTSPLATINSERT]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1028 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[DOTSPLAT]])
1029 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP0]]
1031 svbool_t test_svdup_n_b64(bool op)
1033 return SVE_ACLE_FUNC(svdup,_n,_b64,)(op);