1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -o /dev/null %s
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
17 // CHECK-LABEL: @test_svdupq_lane_s8(
19 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], i64 [[INDEX:%.*]])
20 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
22 // CPP-CHECK-LABEL: @_Z19test_svdupq_lane_s8u10__SVInt8_tm(
23 // CPP-CHECK-NEXT: entry:
24 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], i64 [[INDEX:%.*]])
25 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
27 svint8_t
test_svdupq_lane_s8(svint8_t data
, uint64_t index
)
29 return SVE_ACLE_FUNC(svdupq_lane
,_s8
,,)(data
, index
);
32 // CHECK-LABEL: @test_svdupq_lane_s16(
34 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], i64 [[INDEX:%.*]])
35 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
37 // CPP-CHECK-LABEL: @_Z20test_svdupq_lane_s16u11__SVInt16_tm(
38 // CPP-CHECK-NEXT: entry:
39 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], i64 [[INDEX:%.*]])
40 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
42 svint16_t
test_svdupq_lane_s16(svint16_t data
, uint64_t index
)
44 return SVE_ACLE_FUNC(svdupq_lane
,_s16
,,)(data
, index
);
47 // CHECK-LABEL: @test_svdupq_lane_s32(
49 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], i64 [[INDEX:%.*]])
50 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
52 // CPP-CHECK-LABEL: @_Z20test_svdupq_lane_s32u11__SVInt32_tm(
53 // CPP-CHECK-NEXT: entry:
54 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], i64 [[INDEX:%.*]])
55 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
57 svint32_t
test_svdupq_lane_s32(svint32_t data
, uint64_t index
)
59 return SVE_ACLE_FUNC(svdupq_lane
,_s32
,,)(data
, index
);
62 // CHECK-LABEL: @test_svdupq_lane_s64(
64 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], i64 [[INDEX:%.*]])
65 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
67 // CPP-CHECK-LABEL: @_Z20test_svdupq_lane_s64u11__SVInt64_tm(
68 // CPP-CHECK-NEXT: entry:
69 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], i64 [[INDEX:%.*]])
70 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
72 svint64_t
test_svdupq_lane_s64(svint64_t data
, uint64_t index
)
74 return SVE_ACLE_FUNC(svdupq_lane
,_s64
,,)(data
, index
);
77 // CHECK-LABEL: @test_svdupq_lane_u8(
79 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], i64 [[INDEX:%.*]])
80 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
82 // CPP-CHECK-LABEL: @_Z19test_svdupq_lane_u8u11__SVUint8_tm(
83 // CPP-CHECK-NEXT: entry:
84 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], i64 [[INDEX:%.*]])
85 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
87 svuint8_t
test_svdupq_lane_u8(svuint8_t data
, uint64_t index
)
89 return SVE_ACLE_FUNC(svdupq_lane
,_u8
,,)(data
, index
);
92 // CHECK-LABEL: @test_svdupq_lane_u16(
94 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], i64 [[INDEX:%.*]])
95 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
97 // CPP-CHECK-LABEL: @_Z20test_svdupq_lane_u16u12__SVUint16_tm(
98 // CPP-CHECK-NEXT: entry:
99 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], i64 [[INDEX:%.*]])
100 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
102 svuint16_t
test_svdupq_lane_u16(svuint16_t data
, uint64_t index
)
104 return SVE_ACLE_FUNC(svdupq_lane
,_u16
,,)(data
, index
);
107 // CHECK-LABEL: @test_svdupq_lane_u32(
108 // CHECK-NEXT: entry:
109 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], i64 [[INDEX:%.*]])
110 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
112 // CPP-CHECK-LABEL: @_Z20test_svdupq_lane_u32u12__SVUint32_tm(
113 // CPP-CHECK-NEXT: entry:
114 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], i64 [[INDEX:%.*]])
115 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
117 svuint32_t
test_svdupq_lane_u32(svuint32_t data
, uint64_t index
)
119 return SVE_ACLE_FUNC(svdupq_lane
,_u32
,,)(data
, index
);
122 // CHECK-LABEL: @test_svdupq_lane_u64(
123 // CHECK-NEXT: entry:
124 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], i64 [[INDEX:%.*]])
125 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
127 // CPP-CHECK-LABEL: @_Z20test_svdupq_lane_u64u12__SVUint64_tm(
128 // CPP-CHECK-NEXT: entry:
129 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], i64 [[INDEX:%.*]])
130 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
132 svuint64_t
test_svdupq_lane_u64(svuint64_t data
, uint64_t index
)
134 return SVE_ACLE_FUNC(svdupq_lane
,_u64
,,)(data
, index
);
137 // CHECK-LABEL: @test_svdupq_lane_f16(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], i64 [[INDEX:%.*]])
140 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
142 // CPP-CHECK-LABEL: @_Z20test_svdupq_lane_f16u13__SVFloat16_tm(
143 // CPP-CHECK-NEXT: entry:
144 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], i64 [[INDEX:%.*]])
145 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
147 svfloat16_t
test_svdupq_lane_f16(svfloat16_t data
, uint64_t index
)
149 return SVE_ACLE_FUNC(svdupq_lane
,_f16
,,)(data
, index
);
152 // CHECK-LABEL: @test_svdupq_lane_f32(
153 // CHECK-NEXT: entry:
154 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], i64 [[INDEX:%.*]])
155 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
157 // CPP-CHECK-LABEL: @_Z20test_svdupq_lane_f32u13__SVFloat32_tm(
158 // CPP-CHECK-NEXT: entry:
159 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], i64 [[INDEX:%.*]])
160 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
162 svfloat32_t
test_svdupq_lane_f32(svfloat32_t data
, uint64_t index
)
164 return SVE_ACLE_FUNC(svdupq_lane
,_f32
,,)(data
, index
);
167 // CHECK-LABEL: @test_svdupq_lane_f64(
168 // CHECK-NEXT: entry:
169 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], i64 [[INDEX:%.*]])
170 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
172 // CPP-CHECK-LABEL: @_Z20test_svdupq_lane_f64u13__SVFloat64_tm(
173 // CPP-CHECK-NEXT: entry:
174 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], i64 [[INDEX:%.*]])
175 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
177 svfloat64_t
test_svdupq_lane_f64(svfloat64_t data
, uint64_t index
)
179 return SVE_ACLE_FUNC(svdupq_lane
,_f64
,,)(data
, index
);
182 // CHECK-LABEL: @test_svdupq_n_s8(
183 // CHECK-NEXT: entry:
184 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> poison, i8 [[X0:%.*]], i64 0
185 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[X1:%.*]], i64 1
186 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[X2:%.*]], i64 2
187 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[X3:%.*]], i64 3
188 // CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[X4:%.*]], i64 4
189 // CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[X5:%.*]], i64 5
190 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[X6:%.*]], i64 6
191 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[X7:%.*]], i64 7
192 // CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[X8:%.*]], i64 8
193 // CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[X9:%.*]], i64 9
194 // CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[X10:%.*]], i64 10
195 // CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[X11:%.*]], i64 11
196 // CHECK-NEXT: [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[X12:%.*]], i64 12
197 // CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
198 // CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
199 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
200 // CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> [[TMP15]], i64 0)
201 // CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
202 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
204 // CPP-CHECK-LABEL: @_Z16test_svdupq_n_s8aaaaaaaaaaaaaaaa(
205 // CPP-CHECK-NEXT: entry:
206 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> poison, i8 [[X0:%.*]], i64 0
207 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[X1:%.*]], i64 1
208 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[X2:%.*]], i64 2
209 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[X3:%.*]], i64 3
210 // CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[X4:%.*]], i64 4
211 // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[X5:%.*]], i64 5
212 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[X6:%.*]], i64 6
213 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[X7:%.*]], i64 7
214 // CPP-CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[X8:%.*]], i64 8
215 // CPP-CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[X9:%.*]], i64 9
216 // CPP-CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[X10:%.*]], i64 10
217 // CPP-CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[X11:%.*]], i64 11
218 // CPP-CHECK-NEXT: [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[X12:%.*]], i64 12
219 // CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
220 // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
221 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
222 // CPP-CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> [[TMP15]], i64 0)
223 // CPP-CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
224 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
226 svint8_t
test_svdupq_n_s8(int8_t x0
, int8_t x1
, int8_t x2
, int8_t x3
,
227 int8_t x4
, int8_t x5
, int8_t x6
, int8_t x7
,
228 int8_t x8
, int8_t x9
, int8_t x10
, int8_t x11
,
229 int8_t x12
, int8_t x13
, int8_t x14
, int8_t x15
)
231 // <assume other insertelement>
232 return SVE_ACLE_FUNC(svdupq
,_n
,_s8
,)(x0
, x1
, x2
, x3
, x4
, x5
, x6
, x7
, x8
, x9
, x10
, x11
, x12
, x13
, x14
, x15
);
235 // CHECK-LABEL: @test_svdupq_n_s16(
236 // CHECK-NEXT: entry:
237 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 [[X0:%.*]], i64 0
238 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[X1:%.*]], i64 1
239 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> [[TMP1]], i16 [[X2:%.*]], i64 2
240 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[X3:%.*]], i64 3
241 // CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i16> [[TMP3]], i16 [[X4:%.*]], i64 4
242 // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
243 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
244 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
245 // CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> poison, <8 x i16> [[TMP7]], i64 0)
246 // CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
247 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
249 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_s16ssssssss(
250 // CPP-CHECK-NEXT: entry:
251 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 [[X0:%.*]], i64 0
252 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[X1:%.*]], i64 1
253 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> [[TMP1]], i16 [[X2:%.*]], i64 2
254 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[X3:%.*]], i64 3
255 // CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i16> [[TMP3]], i16 [[X4:%.*]], i64 4
256 // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
257 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
258 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
259 // CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> poison, <8 x i16> [[TMP7]], i64 0)
260 // CPP-CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
261 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
263 svint16_t
test_svdupq_n_s16(int16_t x0
, int16_t x1
, int16_t x2
, int16_t x3
,
264 int16_t x4
, int16_t x5
, int16_t x6
, int16_t x7
)
266 // <assume other insertelement>
267 return SVE_ACLE_FUNC(svdupq
,_n
,_s16
,)(x0
, x1
, x2
, x3
, x4
, x5
, x6
, x7
);
270 // CHECK-LABEL: @test_svdupq_n_s32(
271 // CHECK-NEXT: entry:
272 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 [[X0:%.*]], i64 0
273 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
274 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
275 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
276 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> [[TMP3]], i64 0)
277 // CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
278 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
280 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_s32iiii(
281 // CPP-CHECK-NEXT: entry:
282 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 [[X0:%.*]], i64 0
283 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
284 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
285 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
286 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> [[TMP3]], i64 0)
287 // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
288 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
290 svint32_t
test_svdupq_n_s32(int32_t x0
, int32_t x1
, int32_t x2
, int32_t x3
)
292 // <assume other insertelement>
293 return SVE_ACLE_FUNC(svdupq
,_n
,_s32
,)(x0
, x1
, x2
, x3
);
296 // CHECK-LABEL: @test_svdupq_n_s64(
297 // CHECK-NEXT: entry:
298 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> poison, i64 [[X0:%.*]], i64 0
299 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
300 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> [[TMP1]], i64 0)
301 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
302 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
304 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_s64ll(
305 // CPP-CHECK-NEXT: entry:
306 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> poison, i64 [[X0:%.*]], i64 0
307 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
308 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> [[TMP1]], i64 0)
309 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
310 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
312 svint64_t
test_svdupq_n_s64(int64_t x0
, int64_t x1
)
314 return SVE_ACLE_FUNC(svdupq
,_n
,_s64
,)(x0
, x1
);
317 // CHECK-LABEL: @test_svdupq_n_u8(
318 // CHECK-NEXT: entry:
319 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> poison, i8 [[X0:%.*]], i64 0
320 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[X1:%.*]], i64 1
321 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[X2:%.*]], i64 2
322 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[X3:%.*]], i64 3
323 // CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[X4:%.*]], i64 4
324 // CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[X5:%.*]], i64 5
325 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[X6:%.*]], i64 6
326 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[X7:%.*]], i64 7
327 // CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[X8:%.*]], i64 8
328 // CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[X9:%.*]], i64 9
329 // CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[X10:%.*]], i64 10
330 // CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[X11:%.*]], i64 11
331 // CHECK-NEXT: [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[X12:%.*]], i64 12
332 // CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
333 // CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
334 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
335 // CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> [[TMP15]], i64 0)
336 // CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
337 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
339 // CPP-CHECK-LABEL: @_Z16test_svdupq_n_u8hhhhhhhhhhhhhhhh(
340 // CPP-CHECK-NEXT: entry:
341 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i8> poison, i8 [[X0:%.*]], i64 0
342 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x i8> [[TMP0]], i8 [[X1:%.*]], i64 1
343 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x i8> [[TMP1]], i8 [[X2:%.*]], i64 2
344 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x i8> [[TMP2]], i8 [[X3:%.*]], i64 3
345 // CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x i8> [[TMP3]], i8 [[X4:%.*]], i64 4
346 // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x i8> [[TMP4]], i8 [[X5:%.*]], i64 5
347 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x i8> [[TMP5]], i8 [[X6:%.*]], i64 6
348 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x i8> [[TMP6]], i8 [[X7:%.*]], i64 7
349 // CPP-CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x i8> [[TMP7]], i8 [[X8:%.*]], i64 8
350 // CPP-CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x i8> [[TMP8]], i8 [[X9:%.*]], i64 9
351 // CPP-CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x i8> [[TMP9]], i8 [[X10:%.*]], i64 10
352 // CPP-CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x i8> [[TMP10]], i8 [[X11:%.*]], i64 11
353 // CPP-CHECK-NEXT: [[TMP12:%.*]] = insertelement <16 x i8> [[TMP11]], i8 [[X12:%.*]], i64 12
354 // CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <16 x i8> [[TMP12]], i8 [[X13:%.*]], i64 13
355 // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <16 x i8> [[TMP13]], i8 [[X14:%.*]], i64 14
356 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <16 x i8> [[TMP14]], i8 [[X15:%.*]], i64 15
357 // CPP-CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> [[TMP15]], i64 0)
358 // CPP-CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP16]], i64 0)
359 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP17]]
361 svuint8_t
test_svdupq_n_u8(uint8_t x0
, uint8_t x1
, uint8_t x2
, uint8_t x3
,
362 uint8_t x4
, uint8_t x5
, uint8_t x6
, uint8_t x7
,
363 uint8_t x8
, uint8_t x9
, uint8_t x10
, uint8_t x11
,
364 uint8_t x12
, uint8_t x13
, uint8_t x14
, uint8_t x15
)
366 // <assume other insertelement>
367 return SVE_ACLE_FUNC(svdupq
,_n
,_u8
,)(x0
, x1
, x2
, x3
, x4
, x5
, x6
, x7
, x8
, x9
, x10
, x11
, x12
, x13
, x14
, x15
);
370 // CHECK-LABEL: @test_svdupq_n_u16(
371 // CHECK-NEXT: entry:
372 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 [[X0:%.*]], i64 0
373 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[X1:%.*]], i64 1
374 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> [[TMP1]], i16 [[X2:%.*]], i64 2
375 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[X3:%.*]], i64 3
376 // CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i16> [[TMP3]], i16 [[X4:%.*]], i64 4
377 // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
378 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
379 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
380 // CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> poison, <8 x i16> [[TMP7]], i64 0)
381 // CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
382 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
384 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_u16tttttttt(
385 // CPP-CHECK-NEXT: entry:
386 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x i16> poison, i16 [[X0:%.*]], i64 0
387 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x i16> [[TMP0]], i16 [[X1:%.*]], i64 1
388 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x i16> [[TMP1]], i16 [[X2:%.*]], i64 2
389 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x i16> [[TMP2]], i16 [[X3:%.*]], i64 3
390 // CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x i16> [[TMP3]], i16 [[X4:%.*]], i64 4
391 // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[X5:%.*]], i64 5
392 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x i16> [[TMP5]], i16 [[X6:%.*]], i64 6
393 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x i16> [[TMP6]], i16 [[X7:%.*]], i64 7
394 // CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> poison, <8 x i16> [[TMP7]], i64 0)
395 // CPP-CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP8]], i64 0)
396 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP9]]
398 svuint16_t
test_svdupq_n_u16(uint16_t x0
, uint16_t x1
, uint16_t x2
, uint16_t x3
,
399 uint16_t x4
, uint16_t x5
, uint16_t x6
, uint16_t x7
)
401 // <assume other insertelement>
402 return SVE_ACLE_FUNC(svdupq
,_n
,_u16
,)(x0
, x1
, x2
, x3
, x4
, x5
, x6
, x7
);
405 // CHECK-LABEL: @test_svdupq_n_u32(
406 // CHECK-NEXT: entry:
407 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 [[X0:%.*]], i64 0
408 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
409 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
410 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
411 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> [[TMP3]], i64 0)
412 // CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
413 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
415 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_u32jjjj(
416 // CPP-CHECK-NEXT: entry:
417 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x i32> poison, i32 [[X0:%.*]], i64 0
418 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i32> [[TMP0]], i32 [[X1:%.*]], i64 1
419 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> [[TMP1]], i32 [[X2:%.*]], i64 2
420 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[X3:%.*]], i64 3
421 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> [[TMP3]], i64 0)
422 // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP4]], i64 0)
423 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP5]]
425 svuint32_t
test_svdupq_n_u32(uint32_t x0
, uint32_t x1
, uint32_t x2
, uint32_t x3
)
427 // <assume other insertelement>
428 return SVE_ACLE_FUNC(svdupq
,_n
,_u32
,)(x0
, x1
, x2
, x3
);
431 // CHECK-LABEL: @test_svdupq_n_u64(
432 // CHECK-NEXT: entry:
433 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> poison, i64 [[X0:%.*]], i64 0
434 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
435 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> [[TMP1]], i64 0)
436 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
437 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
439 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_u64mm(
440 // CPP-CHECK-NEXT: entry:
441 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i64> poison, i64 [[X0:%.*]], i64 0
442 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP0]], i64 [[X1:%.*]], i64 1
443 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> [[TMP1]], i64 0)
444 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP2]], i64 0)
445 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
447 svuint64_t
test_svdupq_n_u64(uint64_t x0
, uint64_t x1
)
449 return SVE_ACLE_FUNC(svdupq
,_n
,_u64
,)(x0
, x1
);
452 // CHECK-LABEL: @test_svdupq_n_f16(
453 // CHECK-NEXT: entry:
454 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x half> poison, half [[X0:%.*]], i64 0
455 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half [[X1:%.*]], i64 1
456 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[X2:%.*]], i64 2
457 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[X3:%.*]], i64 3
458 // CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[X4:%.*]], i64 4
459 // CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[X5:%.*]], i64 5
460 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[X6:%.*]], i64 6
461 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[X7:%.*]], i64 7
462 // CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP7]], i64 0)
463 // CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP8]], i64 0)
464 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP9]]
466 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_f16DhDhDhDhDhDhDhDh(
467 // CPP-CHECK-NEXT: entry:
468 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x half> poison, half [[X0:%.*]], i64 0
469 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> [[TMP0]], half [[X1:%.*]], i64 1
470 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[X2:%.*]], i64 2
471 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[X3:%.*]], i64 3
472 // CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[X4:%.*]], i64 4
473 // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[X5:%.*]], i64 5
474 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[X6:%.*]], i64 6
475 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[X7:%.*]], i64 7
476 // CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v8f16(<vscale x 8 x half> poison, <8 x half> [[TMP7]], i64 0)
477 // CPP-CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.dupq.lane.nxv8f16(<vscale x 8 x half> [[TMP8]], i64 0)
478 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP9]]
480 svfloat16_t
test_svdupq_n_f16(float16_t x0
, float16_t x1
, float16_t x2
, float16_t x3
,
481 float16_t x4
, float16_t x5
, float16_t x6
, float16_t x7
)
483 // <assume other insertelement>
484 return SVE_ACLE_FUNC(svdupq
,_n
,_f16
,)(x0
, x1
, x2
, x3
, x4
, x5
, x6
, x7
);
487 // CHECK-LABEL: @test_svdupq_n_f32(
488 // CHECK-NEXT: entry:
489 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> poison, float [[X0:%.*]], i64 0
490 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[X1:%.*]], i64 1
491 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[X2:%.*]], i64 2
492 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[X3:%.*]], i64 3
493 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> [[TMP3]], i64 0)
494 // CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[TMP4]], i64 0)
495 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]]
497 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_f32ffff(
498 // CPP-CHECK-NEXT: entry:
499 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> poison, float [[X0:%.*]], i64 0
500 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[X1:%.*]], i64 1
501 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[X2:%.*]], i64 2
502 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x float> [[TMP2]], float [[X3:%.*]], i64 3
503 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float> poison, <4 x float> [[TMP3]], i64 0)
504 // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.dupq.lane.nxv4f32(<vscale x 4 x float> [[TMP4]], i64 0)
505 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP5]]
507 svfloat32_t
test_svdupq_n_f32(float32_t x0
, float32_t x1
, float32_t x2
, float32_t x3
)
509 // <assume other insertelement>
510 return SVE_ACLE_FUNC(svdupq
,_n
,_f32
,)(x0
, x1
, x2
, x3
);
513 // CHECK-LABEL: @test_svdupq_n_f64(
514 // CHECK-NEXT: entry:
515 // CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[X0:%.*]], i64 0
516 // CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[X1:%.*]], i64 1
517 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> poison, <2 x double> [[TMP1]], i64 0)
518 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[TMP2]], i64 0)
519 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP3]]
521 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_f64dd(
522 // CPP-CHECK-NEXT: entry:
523 // CPP-CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> poison, double [[X0:%.*]], i64 0
524 // CPP-CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[X1:%.*]], i64 1
525 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v2f64(<vscale x 2 x double> poison, <2 x double> [[TMP1]], i64 0)
526 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.dupq.lane.nxv2f64(<vscale x 2 x double> [[TMP2]], i64 0)
527 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP3]]
529 svfloat64_t
test_svdupq_n_f64(float64_t x0
, float64_t x1
)
531 return SVE_ACLE_FUNC(svdupq
,_n
,_f64
,)(x0
, x1
);
534 // CHECK-LABEL: @test_svdupq_n_b8(
535 // CHECK-NEXT: entry:
536 // CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X0:%.*]] to i8
537 // CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[X1:%.*]] to i8
538 // CHECK-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[X2:%.*]] to i8
539 // CHECK-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[X3:%.*]] to i8
540 // CHECK-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[X4:%.*]] to i8
541 // CHECK-NEXT: [[FROMBOOL5:%.*]] = zext i1 [[X5:%.*]] to i8
542 // CHECK-NEXT: [[FROMBOOL6:%.*]] = zext i1 [[X6:%.*]] to i8
543 // CHECK-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[X7:%.*]] to i8
544 // CHECK-NEXT: [[FROMBOOL8:%.*]] = zext i1 [[X8:%.*]] to i8
545 // CHECK-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[X9:%.*]] to i8
546 // CHECK-NEXT: [[FROMBOOL10:%.*]] = zext i1 [[X10:%.*]] to i8
547 // CHECK-NEXT: [[FROMBOOL11:%.*]] = zext i1 [[X11:%.*]] to i8
548 // CHECK-NEXT: [[FROMBOOL12:%.*]] = zext i1 [[X12:%.*]] to i8
549 // CHECK-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[X13:%.*]] to i8
550 // CHECK-NEXT: [[FROMBOOL14:%.*]] = zext i1 [[X14:%.*]] to i8
551 // CHECK-NEXT: [[FROMBOOL15:%.*]] = zext i1 [[X15:%.*]] to i8
552 // CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1
553 // CHECK-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[FROMBOOL1]] to i1
554 // CHECK-NEXT: [[TOBOOL17:%.*]] = trunc i8 [[FROMBOOL2]] to i1
555 // CHECK-NEXT: [[TOBOOL18:%.*]] = trunc i8 [[FROMBOOL3]] to i1
556 // CHECK-NEXT: [[TOBOOL19:%.*]] = trunc i8 [[FROMBOOL4]] to i1
557 // CHECK-NEXT: [[TOBOOL20:%.*]] = trunc i8 [[FROMBOOL5]] to i1
558 // CHECK-NEXT: [[TOBOOL21:%.*]] = trunc i8 [[FROMBOOL6]] to i1
559 // CHECK-NEXT: [[TOBOOL22:%.*]] = trunc i8 [[FROMBOOL7]] to i1
560 // CHECK-NEXT: [[TOBOOL23:%.*]] = trunc i8 [[FROMBOOL8]] to i1
561 // CHECK-NEXT: [[TOBOOL24:%.*]] = trunc i8 [[FROMBOOL9]] to i1
562 // CHECK-NEXT: [[TOBOOL25:%.*]] = trunc i8 [[FROMBOOL10]] to i1
563 // CHECK-NEXT: [[TOBOOL26:%.*]] = trunc i8 [[FROMBOOL11]] to i1
564 // CHECK-NEXT: [[TOBOOL27:%.*]] = trunc i8 [[FROMBOOL12]] to i1
565 // CHECK-NEXT: [[TOBOOL28:%.*]] = trunc i8 [[FROMBOOL13]] to i1
566 // CHECK-NEXT: [[TOBOOL29:%.*]] = trunc i8 [[FROMBOOL14]] to i1
567 // CHECK-NEXT: [[TOBOOL30:%.*]] = trunc i8 [[FROMBOOL15]] to i1
568 // CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[TOBOOL]] to i8
569 // CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL16]] to i8
570 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TOBOOL17]] to i8
571 // CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TOBOOL18]] to i8
572 // CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TOBOOL19]] to i8
573 // CHECK-NEXT: [[TMP5:%.*]] = zext i1 [[TOBOOL20]] to i8
574 // CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TOBOOL21]] to i8
575 // CHECK-NEXT: [[TMP7:%.*]] = zext i1 [[TOBOOL22]] to i8
576 // CHECK-NEXT: [[TMP8:%.*]] = zext i1 [[TOBOOL23]] to i8
577 // CHECK-NEXT: [[TMP9:%.*]] = zext i1 [[TOBOOL24]] to i8
578 // CHECK-NEXT: [[TMP10:%.*]] = zext i1 [[TOBOOL25]] to i8
579 // CHECK-NEXT: [[TMP11:%.*]] = zext i1 [[TOBOOL26]] to i8
580 // CHECK-NEXT: [[TMP12:%.*]] = zext i1 [[TOBOOL27]] to i8
581 // CHECK-NEXT: [[TMP13:%.*]] = zext i1 [[TOBOOL28]] to i8
582 // CHECK-NEXT: [[TMP14:%.*]] = zext i1 [[TOBOOL29]] to i8
583 // CHECK-NEXT: [[TMP15:%.*]] = zext i1 [[TOBOOL30]] to i8
584 // CHECK-NEXT: [[TMP16:%.*]] = insertelement <16 x i8> poison, i8 [[TMP0]], i64 0
585 // CHECK-NEXT: [[TMP17:%.*]] = insertelement <16 x i8> [[TMP16]], i8 [[TMP1]], i64 1
586 // CHECK-NEXT: [[TMP18:%.*]] = insertelement <16 x i8> [[TMP17]], i8 [[TMP2]], i64 2
587 // CHECK-NEXT: [[TMP19:%.*]] = insertelement <16 x i8> [[TMP18]], i8 [[TMP3]], i64 3
588 // CHECK-NEXT: [[TMP20:%.*]] = insertelement <16 x i8> [[TMP19]], i8 [[TMP4]], i64 4
589 // CHECK-NEXT: [[TMP21:%.*]] = insertelement <16 x i8> [[TMP20]], i8 [[TMP5]], i64 5
590 // CHECK-NEXT: [[TMP22:%.*]] = insertelement <16 x i8> [[TMP21]], i8 [[TMP6]], i64 6
591 // CHECK-NEXT: [[TMP23:%.*]] = insertelement <16 x i8> [[TMP22]], i8 [[TMP7]], i64 7
592 // CHECK-NEXT: [[TMP24:%.*]] = insertelement <16 x i8> [[TMP23]], i8 [[TMP8]], i64 8
593 // CHECK-NEXT: [[TMP25:%.*]] = insertelement <16 x i8> [[TMP24]], i8 [[TMP9]], i64 9
594 // CHECK-NEXT: [[TMP26:%.*]] = insertelement <16 x i8> [[TMP25]], i8 [[TMP10]], i64 10
595 // CHECK-NEXT: [[TMP27:%.*]] = insertelement <16 x i8> [[TMP26]], i8 [[TMP11]], i64 11
596 // CHECK-NEXT: [[TMP28:%.*]] = insertelement <16 x i8> [[TMP27]], i8 [[TMP12]], i64 12
597 // CHECK-NEXT: [[TMP29:%.*]] = insertelement <16 x i8> [[TMP28]], i8 [[TMP13]], i64 13
598 // CHECK-NEXT: [[TMP30:%.*]] = insertelement <16 x i8> [[TMP29]], i8 [[TMP14]], i64 14
599 // CHECK-NEXT: [[TMP31:%.*]] = insertelement <16 x i8> [[TMP30]], i8 [[TMP15]], i64 15
600 // CHECK-NEXT: [[TMP32:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> [[TMP31]], i64 0)
601 // CHECK-NEXT: [[TMP33:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP32]], i64 0)
602 // CHECK-NEXT: [[TMP34:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
603 // CHECK-NEXT: [[TMP35:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> [[TMP34]], <vscale x 16 x i8> [[TMP33]], <vscale x 2 x i64> zeroinitializer)
604 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP35]]
606 // CPP-CHECK-LABEL: @_Z16test_svdupq_n_b8bbbbbbbbbbbbbbbb(
607 // CPP-CHECK-NEXT: entry:
608 // CPP-CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X0:%.*]] to i8
609 // CPP-CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[X1:%.*]] to i8
610 // CPP-CHECK-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[X2:%.*]] to i8
611 // CPP-CHECK-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[X3:%.*]] to i8
612 // CPP-CHECK-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[X4:%.*]] to i8
613 // CPP-CHECK-NEXT: [[FROMBOOL5:%.*]] = zext i1 [[X5:%.*]] to i8
614 // CPP-CHECK-NEXT: [[FROMBOOL6:%.*]] = zext i1 [[X6:%.*]] to i8
615 // CPP-CHECK-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[X7:%.*]] to i8
616 // CPP-CHECK-NEXT: [[FROMBOOL8:%.*]] = zext i1 [[X8:%.*]] to i8
617 // CPP-CHECK-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[X9:%.*]] to i8
618 // CPP-CHECK-NEXT: [[FROMBOOL10:%.*]] = zext i1 [[X10:%.*]] to i8
619 // CPP-CHECK-NEXT: [[FROMBOOL11:%.*]] = zext i1 [[X11:%.*]] to i8
620 // CPP-CHECK-NEXT: [[FROMBOOL12:%.*]] = zext i1 [[X12:%.*]] to i8
621 // CPP-CHECK-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[X13:%.*]] to i8
622 // CPP-CHECK-NEXT: [[FROMBOOL14:%.*]] = zext i1 [[X14:%.*]] to i8
623 // CPP-CHECK-NEXT: [[FROMBOOL15:%.*]] = zext i1 [[X15:%.*]] to i8
624 // CPP-CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1
625 // CPP-CHECK-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[FROMBOOL1]] to i1
626 // CPP-CHECK-NEXT: [[TOBOOL17:%.*]] = trunc i8 [[FROMBOOL2]] to i1
627 // CPP-CHECK-NEXT: [[TOBOOL18:%.*]] = trunc i8 [[FROMBOOL3]] to i1
628 // CPP-CHECK-NEXT: [[TOBOOL19:%.*]] = trunc i8 [[FROMBOOL4]] to i1
629 // CPP-CHECK-NEXT: [[TOBOOL20:%.*]] = trunc i8 [[FROMBOOL5]] to i1
630 // CPP-CHECK-NEXT: [[TOBOOL21:%.*]] = trunc i8 [[FROMBOOL6]] to i1
631 // CPP-CHECK-NEXT: [[TOBOOL22:%.*]] = trunc i8 [[FROMBOOL7]] to i1
632 // CPP-CHECK-NEXT: [[TOBOOL23:%.*]] = trunc i8 [[FROMBOOL8]] to i1
633 // CPP-CHECK-NEXT: [[TOBOOL24:%.*]] = trunc i8 [[FROMBOOL9]] to i1
634 // CPP-CHECK-NEXT: [[TOBOOL25:%.*]] = trunc i8 [[FROMBOOL10]] to i1
635 // CPP-CHECK-NEXT: [[TOBOOL26:%.*]] = trunc i8 [[FROMBOOL11]] to i1
636 // CPP-CHECK-NEXT: [[TOBOOL27:%.*]] = trunc i8 [[FROMBOOL12]] to i1
637 // CPP-CHECK-NEXT: [[TOBOOL28:%.*]] = trunc i8 [[FROMBOOL13]] to i1
638 // CPP-CHECK-NEXT: [[TOBOOL29:%.*]] = trunc i8 [[FROMBOOL14]] to i1
639 // CPP-CHECK-NEXT: [[TOBOOL30:%.*]] = trunc i8 [[FROMBOOL15]] to i1
640 // CPP-CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[TOBOOL]] to i8
641 // CPP-CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL16]] to i8
642 // CPP-CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TOBOOL17]] to i8
643 // CPP-CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TOBOOL18]] to i8
644 // CPP-CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TOBOOL19]] to i8
645 // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext i1 [[TOBOOL20]] to i8
646 // CPP-CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TOBOOL21]] to i8
647 // CPP-CHECK-NEXT: [[TMP7:%.*]] = zext i1 [[TOBOOL22]] to i8
648 // CPP-CHECK-NEXT: [[TMP8:%.*]] = zext i1 [[TOBOOL23]] to i8
649 // CPP-CHECK-NEXT: [[TMP9:%.*]] = zext i1 [[TOBOOL24]] to i8
650 // CPP-CHECK-NEXT: [[TMP10:%.*]] = zext i1 [[TOBOOL25]] to i8
651 // CPP-CHECK-NEXT: [[TMP11:%.*]] = zext i1 [[TOBOOL26]] to i8
652 // CPP-CHECK-NEXT: [[TMP12:%.*]] = zext i1 [[TOBOOL27]] to i8
653 // CPP-CHECK-NEXT: [[TMP13:%.*]] = zext i1 [[TOBOOL28]] to i8
654 // CPP-CHECK-NEXT: [[TMP14:%.*]] = zext i1 [[TOBOOL29]] to i8
655 // CPP-CHECK-NEXT: [[TMP15:%.*]] = zext i1 [[TOBOOL30]] to i8
656 // CPP-CHECK-NEXT: [[TMP16:%.*]] = insertelement <16 x i8> poison, i8 [[TMP0]], i64 0
657 // CPP-CHECK-NEXT: [[TMP17:%.*]] = insertelement <16 x i8> [[TMP16]], i8 [[TMP1]], i64 1
658 // CPP-CHECK-NEXT: [[TMP18:%.*]] = insertelement <16 x i8> [[TMP17]], i8 [[TMP2]], i64 2
659 // CPP-CHECK-NEXT: [[TMP19:%.*]] = insertelement <16 x i8> [[TMP18]], i8 [[TMP3]], i64 3
660 // CPP-CHECK-NEXT: [[TMP20:%.*]] = insertelement <16 x i8> [[TMP19]], i8 [[TMP4]], i64 4
661 // CPP-CHECK-NEXT: [[TMP21:%.*]] = insertelement <16 x i8> [[TMP20]], i8 [[TMP5]], i64 5
662 // CPP-CHECK-NEXT: [[TMP22:%.*]] = insertelement <16 x i8> [[TMP21]], i8 [[TMP6]], i64 6
663 // CPP-CHECK-NEXT: [[TMP23:%.*]] = insertelement <16 x i8> [[TMP22]], i8 [[TMP7]], i64 7
664 // CPP-CHECK-NEXT: [[TMP24:%.*]] = insertelement <16 x i8> [[TMP23]], i8 [[TMP8]], i64 8
665 // CPP-CHECK-NEXT: [[TMP25:%.*]] = insertelement <16 x i8> [[TMP24]], i8 [[TMP9]], i64 9
666 // CPP-CHECK-NEXT: [[TMP26:%.*]] = insertelement <16 x i8> [[TMP25]], i8 [[TMP10]], i64 10
667 // CPP-CHECK-NEXT: [[TMP27:%.*]] = insertelement <16 x i8> [[TMP26]], i8 [[TMP11]], i64 11
668 // CPP-CHECK-NEXT: [[TMP28:%.*]] = insertelement <16 x i8> [[TMP27]], i8 [[TMP12]], i64 12
669 // CPP-CHECK-NEXT: [[TMP29:%.*]] = insertelement <16 x i8> [[TMP28]], i8 [[TMP13]], i64 13
670 // CPP-CHECK-NEXT: [[TMP30:%.*]] = insertelement <16 x i8> [[TMP29]], i8 [[TMP14]], i64 14
671 // CPP-CHECK-NEXT: [[TMP31:%.*]] = insertelement <16 x i8> [[TMP30]], i8 [[TMP15]], i64 15
672 // CPP-CHECK-NEXT: [[TMP32:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v16i8(<vscale x 16 x i8> poison, <16 x i8> [[TMP31]], i64 0)
673 // CPP-CHECK-NEXT: [[TMP33:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dupq.lane.nxv16i8(<vscale x 16 x i8> [[TMP32]], i64 0)
674 // CPP-CHECK-NEXT: [[TMP34:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
675 // CPP-CHECK-NEXT: [[TMP35:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> [[TMP34]], <vscale x 16 x i8> [[TMP33]], <vscale x 2 x i64> zeroinitializer)
676 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP35]]
678 svbool_t
test_svdupq_n_b8(bool x0
, bool x1
, bool x2
, bool x3
,
679 bool x4
, bool x5
, bool x6
, bool x7
,
680 bool x8
, bool x9
, bool x10
, bool x11
,
681 bool x12
, bool x13
, bool x14
, bool x15
)
683 // <assume other insertelement>
684 return SVE_ACLE_FUNC(svdupq
,_n
,_b8
,)(x0
, x1
, x2
, x3
, x4
, x5
, x6
, x7
, x8
, x9
, x10
, x11
, x12
, x13
, x14
, x15
);
687 // CHECK-LABEL: @test_svdupq_n_b16(
688 // CHECK-NEXT: entry:
689 // CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X0:%.*]] to i8
690 // CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[X1:%.*]] to i8
691 // CHECK-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[X2:%.*]] to i8
692 // CHECK-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[X3:%.*]] to i8
693 // CHECK-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[X4:%.*]] to i8
694 // CHECK-NEXT: [[FROMBOOL5:%.*]] = zext i1 [[X5:%.*]] to i8
695 // CHECK-NEXT: [[FROMBOOL6:%.*]] = zext i1 [[X6:%.*]] to i8
696 // CHECK-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[X7:%.*]] to i8
697 // CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1
698 // CHECK-NEXT: [[TOBOOL8:%.*]] = trunc i8 [[FROMBOOL1]] to i1
699 // CHECK-NEXT: [[TOBOOL9:%.*]] = trunc i8 [[FROMBOOL2]] to i1
700 // CHECK-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[FROMBOOL3]] to i1
701 // CHECK-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[FROMBOOL4]] to i1
702 // CHECK-NEXT: [[TOBOOL12:%.*]] = trunc i8 [[FROMBOOL5]] to i1
703 // CHECK-NEXT: [[TOBOOL13:%.*]] = trunc i8 [[FROMBOOL6]] to i1
704 // CHECK-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[FROMBOOL7]] to i1
705 // CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[TOBOOL]] to i16
706 // CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL8]] to i16
707 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TOBOOL9]] to i16
708 // CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TOBOOL10]] to i16
709 // CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TOBOOL11]] to i16
710 // CHECK-NEXT: [[TMP5:%.*]] = zext i1 [[TOBOOL12]] to i16
711 // CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TOBOOL13]] to i16
712 // CHECK-NEXT: [[TMP7:%.*]] = zext i1 [[TOBOOL14]] to i16
713 // CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x i16> poison, i16 [[TMP0]], i64 0
714 // CHECK-NEXT: [[TMP9:%.*]] = insertelement <8 x i16> [[TMP8]], i16 [[TMP1]], i64 1
715 // CHECK-NEXT: [[TMP10:%.*]] = insertelement <8 x i16> [[TMP9]], i16 [[TMP2]], i64 2
716 // CHECK-NEXT: [[TMP11:%.*]] = insertelement <8 x i16> [[TMP10]], i16 [[TMP3]], i64 3
717 // CHECK-NEXT: [[TMP12:%.*]] = insertelement <8 x i16> [[TMP11]], i16 [[TMP4]], i64 4
718 // CHECK-NEXT: [[TMP13:%.*]] = insertelement <8 x i16> [[TMP12]], i16 [[TMP5]], i64 5
719 // CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP6]], i64 6
720 // CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP7]], i64 7
721 // CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> poison, <8 x i16> [[TMP15]], i64 0)
722 // CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP16]], i64 0)
723 // CHECK-NEXT: [[TMP18:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
724 // CHECK-NEXT: [[TMP19:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> [[TMP18]], <vscale x 8 x i16> [[TMP17]], <vscale x 2 x i64> zeroinitializer)
725 // CHECK-NEXT: [[TMP20:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP19]])
726 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP20]]
728 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_b16bbbbbbbb(
729 // CPP-CHECK-NEXT: entry:
730 // CPP-CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X0:%.*]] to i8
731 // CPP-CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[X1:%.*]] to i8
732 // CPP-CHECK-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[X2:%.*]] to i8
733 // CPP-CHECK-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[X3:%.*]] to i8
734 // CPP-CHECK-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[X4:%.*]] to i8
735 // CPP-CHECK-NEXT: [[FROMBOOL5:%.*]] = zext i1 [[X5:%.*]] to i8
736 // CPP-CHECK-NEXT: [[FROMBOOL6:%.*]] = zext i1 [[X6:%.*]] to i8
737 // CPP-CHECK-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[X7:%.*]] to i8
738 // CPP-CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1
739 // CPP-CHECK-NEXT: [[TOBOOL8:%.*]] = trunc i8 [[FROMBOOL1]] to i1
740 // CPP-CHECK-NEXT: [[TOBOOL9:%.*]] = trunc i8 [[FROMBOOL2]] to i1
741 // CPP-CHECK-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[FROMBOOL3]] to i1
742 // CPP-CHECK-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[FROMBOOL4]] to i1
743 // CPP-CHECK-NEXT: [[TOBOOL12:%.*]] = trunc i8 [[FROMBOOL5]] to i1
744 // CPP-CHECK-NEXT: [[TOBOOL13:%.*]] = trunc i8 [[FROMBOOL6]] to i1
745 // CPP-CHECK-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[FROMBOOL7]] to i1
746 // CPP-CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[TOBOOL]] to i16
747 // CPP-CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL8]] to i16
748 // CPP-CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TOBOOL9]] to i16
749 // CPP-CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TOBOOL10]] to i16
750 // CPP-CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TOBOOL11]] to i16
751 // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext i1 [[TOBOOL12]] to i16
752 // CPP-CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TOBOOL13]] to i16
753 // CPP-CHECK-NEXT: [[TMP7:%.*]] = zext i1 [[TOBOOL14]] to i16
754 // CPP-CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x i16> poison, i16 [[TMP0]], i64 0
755 // CPP-CHECK-NEXT: [[TMP9:%.*]] = insertelement <8 x i16> [[TMP8]], i16 [[TMP1]], i64 1
756 // CPP-CHECK-NEXT: [[TMP10:%.*]] = insertelement <8 x i16> [[TMP9]], i16 [[TMP2]], i64 2
757 // CPP-CHECK-NEXT: [[TMP11:%.*]] = insertelement <8 x i16> [[TMP10]], i16 [[TMP3]], i64 3
758 // CPP-CHECK-NEXT: [[TMP12:%.*]] = insertelement <8 x i16> [[TMP11]], i16 [[TMP4]], i64 4
759 // CPP-CHECK-NEXT: [[TMP13:%.*]] = insertelement <8 x i16> [[TMP12]], i16 [[TMP5]], i64 5
760 // CPP-CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP6]], i64 6
761 // CPP-CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP7]], i64 7
762 // CPP-CHECK-NEXT: [[TMP16:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v8i16(<vscale x 8 x i16> poison, <8 x i16> [[TMP15]], i64 0)
763 // CPP-CHECK-NEXT: [[TMP17:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dupq.lane.nxv8i16(<vscale x 8 x i16> [[TMP16]], i64 0)
764 // CPP-CHECK-NEXT: [[TMP18:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
765 // CPP-CHECK-NEXT: [[TMP19:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> [[TMP18]], <vscale x 8 x i16> [[TMP17]], <vscale x 2 x i64> zeroinitializer)
766 // CPP-CHECK-NEXT: [[TMP20:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP19]])
767 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP20]]
769 svbool_t
test_svdupq_n_b16(bool x0
, bool x1
, bool x2
, bool x3
,
770 bool x4
, bool x5
, bool x6
, bool x7
)
772 // <assume other insertelement>
773 return SVE_ACLE_FUNC(svdupq
,_n
,_b16
,)(x0
, x1
, x2
, x3
, x4
, x5
, x6
, x7
);
776 // CHECK-LABEL: @test_svdupq_n_b32(
777 // CHECK-NEXT: entry:
778 // CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X0:%.*]] to i8
779 // CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[X1:%.*]] to i8
780 // CHECK-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[X2:%.*]] to i8
781 // CHECK-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[X3:%.*]] to i8
782 // CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1
783 // CHECK-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[FROMBOOL1]] to i1
784 // CHECK-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[FROMBOOL2]] to i1
785 // CHECK-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[FROMBOOL3]] to i1
786 // CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[TOBOOL]] to i32
787 // CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL4]] to i32
788 // CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TOBOOL5]] to i32
789 // CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TOBOOL6]] to i32
790 // CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0
791 // CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[TMP1]], i64 1
792 // CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP2]], i64 2
793 // CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP3]], i64 3
794 // CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> [[TMP7]], i64 0)
795 // CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP8]], i64 0)
796 // CHECK-NEXT: [[TMP10:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
797 // CHECK-NEXT: [[TMP11:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> [[TMP10]], <vscale x 4 x i32> [[TMP9]], <vscale x 2 x i64> zeroinitializer)
798 // CHECK-NEXT: [[TMP12:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP11]])
799 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP12]]
801 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_b32bbbb(
802 // CPP-CHECK-NEXT: entry:
803 // CPP-CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X0:%.*]] to i8
804 // CPP-CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[X1:%.*]] to i8
805 // CPP-CHECK-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[X2:%.*]] to i8
806 // CPP-CHECK-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[X3:%.*]] to i8
807 // CPP-CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1
808 // CPP-CHECK-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[FROMBOOL1]] to i1
809 // CPP-CHECK-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[FROMBOOL2]] to i1
810 // CPP-CHECK-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[FROMBOOL3]] to i1
811 // CPP-CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[TOBOOL]] to i32
812 // CPP-CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL4]] to i32
813 // CPP-CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TOBOOL5]] to i32
814 // CPP-CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TOBOOL6]] to i32
815 // CPP-CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0
816 // CPP-CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[TMP1]], i64 1
817 // CPP-CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP2]], i64 2
818 // CPP-CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP3]], i64 3
819 // CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> [[TMP7]], i64 0)
820 // CPP-CHECK-NEXT: [[TMP9:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dupq.lane.nxv4i32(<vscale x 4 x i32> [[TMP8]], i64 0)
821 // CPP-CHECK-NEXT: [[TMP10:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
822 // CPP-CHECK-NEXT: [[TMP11:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> [[TMP10]], <vscale x 4 x i32> [[TMP9]], <vscale x 2 x i64> zeroinitializer)
823 // CPP-CHECK-NEXT: [[TMP12:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP11]])
824 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP12]]
826 svbool_t
test_svdupq_n_b32(bool x0
, bool x1
, bool x2
, bool x3
)
828 // <assume other insertelement>
829 return SVE_ACLE_FUNC(svdupq
,_n
,_b32
,)(x0
, x1
, x2
, x3
);
832 // CHECK-LABEL: @test_svdupq_n_b64(
833 // CHECK-NEXT: entry:
834 // CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X0:%.*]] to i8
835 // CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[X1:%.*]] to i8
836 // CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1
837 // CHECK-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[FROMBOOL1]] to i1
838 // CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[TOBOOL]] to i64
839 // CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL2]] to i64
840 // CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[TMP0]], i64 0
841 // CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[TMP1]], i64 1
842 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> [[TMP3]], i64 0)
843 // CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP4]], i64 0)
844 // CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
845 // CHECK-NEXT: [[TMP7:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[TMP5]], <vscale x 2 x i64> zeroinitializer)
846 // CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[TMP7]])
847 // CHECK-NEXT: ret <vscale x 16 x i1> [[TMP8]]
849 // CPP-CHECK-LABEL: @_Z17test_svdupq_n_b64bb(
850 // CPP-CHECK-NEXT: entry:
851 // CPP-CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[X0:%.*]] to i8
852 // CPP-CHECK-NEXT: [[FROMBOOL1:%.*]] = zext i1 [[X1:%.*]] to i8
853 // CPP-CHECK-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1
854 // CPP-CHECK-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[FROMBOOL1]] to i1
855 // CPP-CHECK-NEXT: [[TMP0:%.*]] = zext i1 [[TOBOOL]] to i64
856 // CPP-CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[TOBOOL2]] to i64
857 // CPP-CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[TMP0]], i64 0
858 // CPP-CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[TMP1]], i64 1
859 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v2i64(<vscale x 2 x i64> poison, <2 x i64> [[TMP3]], i64 0)
860 // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dupq.lane.nxv2i64(<vscale x 2 x i64> [[TMP4]], i64 0)
861 // CPP-CHECK-NEXT: [[TMP6:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
862 // CPP-CHECK-NEXT: [[TMP7:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[TMP5]], <vscale x 2 x i64> zeroinitializer)
863 // CPP-CHECK-NEXT: [[TMP8:%.*]] = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[TMP7]])
864 // CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP8]]
866 svbool_t
test_svdupq_n_b64(bool x0
, bool x1
)
868 return SVE_ACLE_FUNC(svdupq
,_n
,_b64
,)(x0
, x1
);