1 // RUN: %clang_cc1 -triple arm64-apple-darwin -target-feature +neon \
2 // RUN: -disable-O0-optnone -emit-llvm -o - %s \
3 // RUN: | opt -S -passes=mem2reg | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 // CHECK-LABEL: define{{.*}} i8 @test_vget_lane_u8(<8 x i8> noundef %a) #0 {
10 // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7
11 // CHECK: ret i8 [[VGET_LANE]]
12 uint8_t test_vget_lane_u8(uint8x8_t a
) {
13 return vget_lane_u8(a
, 7);
16 // CHECK-LABEL: define{{.*}} i16 @test_vget_lane_u16(<4 x i16> noundef %a) #0 {
17 // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %a, i32 3
18 // CHECK: ret i16 [[VGET_LANE]]
19 uint16_t test_vget_lane_u16(uint16x4_t a
) {
20 return vget_lane_u16(a
, 3);
23 // CHECK-LABEL: define{{.*}} i32 @test_vget_lane_u32(<2 x i32> noundef %a) #0 {
24 // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> %a, i32 1
25 // CHECK: ret i32 [[VGET_LANE]]
26 uint32_t test_vget_lane_u32(uint32x2_t a
) {
27 return vget_lane_u32(a
, 1);
30 // CHECK-LABEL: define{{.*}} i8 @test_vget_lane_s8(<8 x i8> noundef %a) #0 {
31 // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7
32 // CHECK: ret i8 [[VGET_LANE]]
33 int8_t test_vget_lane_s8(int8x8_t a
) {
34 return vget_lane_s8(a
, 7);
37 // CHECK-LABEL: define{{.*}} i16 @test_vget_lane_s16(<4 x i16> noundef %a) #0 {
38 // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %a, i32 3
39 // CHECK: ret i16 [[VGET_LANE]]
40 int16_t test_vget_lane_s16(int16x4_t a
) {
41 return vget_lane_s16(a
, 3);
44 // CHECK-LABEL: define{{.*}} i32 @test_vget_lane_s32(<2 x i32> noundef %a) #0 {
45 // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x i32> %a, i32 1
46 // CHECK: ret i32 [[VGET_LANE]]
47 int32_t test_vget_lane_s32(int32x2_t a
) {
48 return vget_lane_s32(a
, 1);
51 // CHECK-LABEL: define{{.*}} i8 @test_vget_lane_p8(<8 x i8> noundef %a) #0 {
52 // CHECK: [[VGET_LANE:%.*]] = extractelement <8 x i8> %a, i32 7
53 // CHECK: ret i8 [[VGET_LANE]]
54 poly8_t
test_vget_lane_p8(poly8x8_t a
) {
55 return vget_lane_p8(a
, 7);
58 // CHECK-LABEL: define{{.*}} i16 @test_vget_lane_p16(<4 x i16> noundef %a) #0 {
59 // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> %a, i32 3
60 // CHECK: ret i16 [[VGET_LANE]]
61 poly16_t
test_vget_lane_p16(poly16x4_t a
) {
62 return vget_lane_p16(a
, 3);
65 // CHECK-LABEL: define{{.*}} float @test_vget_lane_f32(<2 x float> noundef %a) #0 {
66 // CHECK: [[VGET_LANE:%.*]] = extractelement <2 x float> %a, i32 1
67 // CHECK: ret float [[VGET_LANE]]
68 float32_t
test_vget_lane_f32(float32x2_t a
) {
69 return vget_lane_f32(a
, 1);
72 // CHECK-LABEL: define{{.*}} float @test_vget_lane_f16(<4 x half> noundef %a) #0 {
73 // CHECK: [[__REINT_242:%.*]] = alloca <4 x half>, align 8
74 // CHECK: [[__REINT1_242:%.*]] = alloca i16, align 2
75 // CHECK: store <4 x half> %a, ptr [[__REINT_242]], align 8
76 // CHECK: [[TMP1:%.*]] = load <4 x i16>, ptr [[__REINT_242]], align 8
77 // CHECK: [[VGET_LANE:%.*]] = extractelement <4 x i16> [[TMP1]], i32 1
78 // CHECK: store i16 [[VGET_LANE]], ptr [[__REINT1_242]], align 2
79 // CHECK: [[TMP5:%.*]] = load half, ptr [[__REINT1_242]], align 2
80 // CHECK: [[CONV:%.*]] = fpext half [[TMP5]] to float
81 // CHECK: ret float [[CONV]]
82 float32_t
test_vget_lane_f16(float16x4_t a
) {
83 return vget_lane_f16(a
, 1);
86 // CHECK-LABEL: define{{.*}} i8 @test_vgetq_lane_u8(<16 x i8> noundef %a) #0 {
87 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15
88 // CHECK: ret i8 [[VGETQ_LANE]]
89 uint8_t test_vgetq_lane_u8(uint8x16_t a
) {
90 return vgetq_lane_u8(a
, 15);
93 // CHECK-LABEL: define{{.*}} i16 @test_vgetq_lane_u16(<8 x i16> noundef %a) #0 {
94 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7
95 // CHECK: ret i16 [[VGETQ_LANE]]
96 uint16_t test_vgetq_lane_u16(uint16x8_t a
) {
97 return vgetq_lane_u16(a
, 7);
100 // CHECK-LABEL: define{{.*}} i32 @test_vgetq_lane_u32(<4 x i32> noundef %a) #0 {
101 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a, i32 3
102 // CHECK: ret i32 [[VGETQ_LANE]]
103 uint32_t test_vgetq_lane_u32(uint32x4_t a
) {
104 return vgetq_lane_u32(a
, 3);
107 // CHECK-LABEL: define{{.*}} i8 @test_vgetq_lane_s8(<16 x i8> noundef %a) #0 {
108 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15
109 // CHECK: ret i8 [[VGETQ_LANE]]
110 int8_t test_vgetq_lane_s8(int8x16_t a
) {
111 return vgetq_lane_s8(a
, 15);
114 // CHECK-LABEL: define{{.*}} i16 @test_vgetq_lane_s16(<8 x i16> noundef %a) #0 {
115 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7
116 // CHECK: ret i16 [[VGETQ_LANE]]
117 int16_t test_vgetq_lane_s16(int16x8_t a
) {
118 return vgetq_lane_s16(a
, 7);
121 // CHECK-LABEL: define{{.*}} i32 @test_vgetq_lane_s32(<4 x i32> noundef %a) #0 {
122 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x i32> %a, i32 3
123 // CHECK: ret i32 [[VGETQ_LANE]]
124 int32_t test_vgetq_lane_s32(int32x4_t a
) {
125 return vgetq_lane_s32(a
, 3);
128 // CHECK-LABEL: define{{.*}} i8 @test_vgetq_lane_p8(<16 x i8> noundef %a) #0 {
129 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <16 x i8> %a, i32 15
130 // CHECK: ret i8 [[VGETQ_LANE]]
131 poly8_t
test_vgetq_lane_p8(poly8x16_t a
) {
132 return vgetq_lane_p8(a
, 15);
135 // CHECK-LABEL: define{{.*}} i16 @test_vgetq_lane_p16(<8 x i16> noundef %a) #0 {
136 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> %a, i32 7
137 // CHECK: ret i16 [[VGETQ_LANE]]
138 poly16_t
test_vgetq_lane_p16(poly16x8_t a
) {
139 return vgetq_lane_p16(a
, 7);
142 // CHECK-LABEL: define{{.*}} float @test_vgetq_lane_f32(<4 x float> noundef %a) #0 {
143 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <4 x float> %a, i32 3
144 // CHECK: ret float [[VGETQ_LANE]]
145 float32_t
test_vgetq_lane_f32(float32x4_t a
) {
146 return vgetq_lane_f32(a
, 3);
149 // CHECK-LABEL: define{{.*}} float @test_vgetq_lane_f16(<8 x half> noundef %a) #0 {
150 // CHECK: [[__REINT_244:%.*]] = alloca <8 x half>, align 16
151 // CHECK: [[__REINT1_244:%.*]] = alloca i16, align 2
152 // CHECK: store <8 x half> %a, ptr [[__REINT_244]], align 16
153 // CHECK: [[TMP1:%.*]] = load <8 x i16>, ptr [[__REINT_244]], align 16
154 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <8 x i16> [[TMP1]], i32 3
155 // CHECK: store i16 [[VGETQ_LANE]], ptr [[__REINT1_244]], align 2
156 // CHECK: [[TMP5:%.*]] = load half, ptr [[__REINT1_244]], align 2
157 // CHECK: [[CONV:%.*]] = fpext half [[TMP5]] to float
158 // CHECK: ret float [[CONV]]
159 float32_t
test_vgetq_lane_f16(float16x8_t a
) {
160 return vgetq_lane_f16(a
, 3);
163 // CHECK-LABEL: define{{.*}} i64 @test_vget_lane_s64(<1 x i64> noundef %a) #0 {
164 // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> %a, i32 0
165 // CHECK: ret i64 [[VGET_LANE]]
166 int64_t test_vget_lane_s64(int64x1_t a
) {
167 return vget_lane_s64(a
, 0);
170 // CHECK-LABEL: define{{.*}} i64 @test_vget_lane_u64(<1 x i64> noundef %a) #0 {
171 // CHECK: [[VGET_LANE:%.*]] = extractelement <1 x i64> %a, i32 0
172 // CHECK: ret i64 [[VGET_LANE]]
173 uint64_t test_vget_lane_u64(uint64x1_t a
) {
174 return vget_lane_u64(a
, 0);
177 // CHECK-LABEL: define{{.*}} i64 @test_vgetq_lane_s64(<2 x i64> noundef %a) #0 {
178 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a, i32 1
179 // CHECK: ret i64 [[VGETQ_LANE]]
180 int64_t test_vgetq_lane_s64(int64x2_t a
) {
181 return vgetq_lane_s64(a
, 1);
184 // CHECK-LABEL: define{{.*}} i64 @test_vgetq_lane_u64(<2 x i64> noundef %a) #0 {
185 // CHECK: [[VGETQ_LANE:%.*]] = extractelement <2 x i64> %a, i32 1
186 // CHECK: ret i64 [[VGETQ_LANE]]
187 uint64_t test_vgetq_lane_u64(uint64x2_t a
) {
188 return vgetq_lane_u64(a
, 1);
192 // CHECK-LABEL: define{{.*}} <8 x i8> @test_vset_lane_u8(i8 noundef %a, <8 x i8> noundef %b) #0 {
193 // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i8> %b, i8 %a, i32 7
194 // CHECK: ret <8 x i8> [[VSET_LANE]]
195 uint8x8_t
test_vset_lane_u8(uint8_t a
, uint8x8_t b
) {
196 return vset_lane_u8(a
, b
, 7);
199 // CHECK-LABEL: define{{.*}} <4 x i16> @test_vset_lane_u16(i16 noundef %a, <4 x i16> noundef %b) #0 {
200 // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i16> %b, i16 %a, i32 3
201 // CHECK: ret <4 x i16> [[VSET_LANE]]
202 uint16x4_t
test_vset_lane_u16(uint16_t a
, uint16x4_t b
) {
203 return vset_lane_u16(a
, b
, 3);
206 // CHECK-LABEL: define{{.*}} <2 x i32> @test_vset_lane_u32(i32 noundef %a, <2 x i32> noundef %b) #0 {
207 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i32> %b, i32 %a, i32 1
208 // CHECK: ret <2 x i32> [[VSET_LANE]]
209 uint32x2_t
test_vset_lane_u32(uint32_t a
, uint32x2_t b
) {
210 return vset_lane_u32(a
, b
, 1);
213 // CHECK-LABEL: define{{.*}} <8 x i8> @test_vset_lane_s8(i8 noundef %a, <8 x i8> noundef %b) #0 {
214 // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i8> %b, i8 %a, i32 7
215 // CHECK: ret <8 x i8> [[VSET_LANE]]
216 int8x8_t
test_vset_lane_s8(int8_t a
, int8x8_t b
) {
217 return vset_lane_s8(a
, b
, 7);
220 // CHECK-LABEL: define{{.*}} <4 x i16> @test_vset_lane_s16(i16 noundef %a, <4 x i16> noundef %b) #0 {
221 // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i16> %b, i16 %a, i32 3
222 // CHECK: ret <4 x i16> [[VSET_LANE]]
223 int16x4_t
test_vset_lane_s16(int16_t a
, int16x4_t b
) {
224 return vset_lane_s16(a
, b
, 3);
227 // CHECK-LABEL: define{{.*}} <2 x i32> @test_vset_lane_s32(i32 noundef %a, <2 x i32> noundef %b) #0 {
228 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i32> %b, i32 %a, i32 1
229 // CHECK: ret <2 x i32> [[VSET_LANE]]
230 int32x2_t
test_vset_lane_s32(int32_t a
, int32x2_t b
) {
231 return vset_lane_s32(a
, b
, 1);
234 // CHECK-LABEL: define{{.*}} <8 x i8> @test_vset_lane_p8(i8 noundef %a, <8 x i8> noundef %b) #0 {
235 // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i8> %b, i8 %a, i32 7
236 // CHECK: ret <8 x i8> [[VSET_LANE]]
237 poly8x8_t
test_vset_lane_p8(poly8_t a
, poly8x8_t b
) {
238 return vset_lane_p8(a
, b
, 7);
241 // CHECK-LABEL: define{{.*}} <4 x i16> @test_vset_lane_p16(i16 noundef %a, <4 x i16> noundef %b) #0 {
242 // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i16> %b, i16 %a, i32 3
243 // CHECK: ret <4 x i16> [[VSET_LANE]]
244 poly16x4_t
test_vset_lane_p16(poly16_t a
, poly16x4_t b
) {
245 return vset_lane_p16(a
, b
, 3);
248 // CHECK-LABEL: define{{.*}} <2 x float> @test_vset_lane_f32(float noundef %a, <2 x float> noundef %b) #0 {
249 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x float> %b, float %a, i32 1
250 // CHECK: ret <2 x float> [[VSET_LANE]]
251 float32x2_t
test_vset_lane_f32(float32_t a
, float32x2_t b
) {
252 return vset_lane_f32(a
, b
, 1);
255 // CHECK-LABEL: define{{.*}} <4 x half> @test_vset_lane_f16(ptr noundef %a, <4 x half> noundef %b) #0 {
256 // CHECK: [[__REINT_246:%.*]] = alloca half, align 2
257 // CHECK: [[__REINT1_246:%.*]] = alloca <4 x half>, align 8
258 // CHECK: [[__REINT2_246:%.*]] = alloca <4 x i16>, align 8
259 // CHECK: [[TMP0:%.*]] = load half, ptr %a, align 2
260 // CHECK: store half [[TMP0]], ptr [[__REINT_246]], align 2
261 // CHECK: store <4 x half> %b, ptr [[__REINT1_246]], align 8
262 // CHECK: [[TMP2:%.*]] = load i16, ptr [[__REINT_246]], align 2
263 // CHECK: [[TMP4:%.*]] = load <4 x i16>, ptr [[__REINT1_246]], align 8
264 // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i16> [[TMP4]], i16 [[TMP2]], i32 3
265 // CHECK: store <4 x i16> [[VSET_LANE]], ptr [[__REINT2_246]], align 8
266 // CHECK: [[TMP8:%.*]] = load <4 x half>, ptr [[__REINT2_246]], align 8
267 // CHECK: ret <4 x half> [[TMP8]]
268 float16x4_t
test_vset_lane_f16(float16_t
*a
, float16x4_t b
) {
269 return vset_lane_f16(*a
, b
, 3);
272 // CHECK-LABEL: define{{.*}} <16 x i8> @test_vsetq_lane_u8(i8 noundef %a, <16 x i8> noundef %b) #0 {
273 // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %b, i8 %a, i32 15
274 // CHECK: ret <16 x i8> [[VSET_LANE]]
275 uint8x16_t
test_vsetq_lane_u8(uint8_t a
, uint8x16_t b
) {
276 return vsetq_lane_u8(a
, b
, 15);
279 // CHECK-LABEL: define{{.*}} <8 x i16> @test_vsetq_lane_u16(i16 noundef %a, <8 x i16> noundef %b) #0 {
280 // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %b, i16 %a, i32 7
281 // CHECK: ret <8 x i16> [[VSET_LANE]]
282 uint16x8_t
test_vsetq_lane_u16(uint16_t a
, uint16x8_t b
) {
283 return vsetq_lane_u16(a
, b
, 7);
286 // CHECK-LABEL: define{{.*}} <4 x i32> @test_vsetq_lane_u32(i32 noundef %a, <4 x i32> noundef %b) #0 {
287 // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i32> %b, i32 %a, i32 3
288 // CHECK: ret <4 x i32> [[VSET_LANE]]
289 uint32x4_t
test_vsetq_lane_u32(uint32_t a
, uint32x4_t b
) {
290 return vsetq_lane_u32(a
, b
, 3);
293 // CHECK-LABEL: define{{.*}} <16 x i8> @test_vsetq_lane_s8(i8 noundef %a, <16 x i8> noundef %b) #0 {
294 // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %b, i8 %a, i32 15
295 // CHECK: ret <16 x i8> [[VSET_LANE]]
296 int8x16_t
test_vsetq_lane_s8(int8_t a
, int8x16_t b
) {
297 return vsetq_lane_s8(a
, b
, 15);
300 // CHECK-LABEL: define{{.*}} <8 x i16> @test_vsetq_lane_s16(i16 noundef %a, <8 x i16> noundef %b) #0 {
301 // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %b, i16 %a, i32 7
302 // CHECK: ret <8 x i16> [[VSET_LANE]]
303 int16x8_t
test_vsetq_lane_s16(int16_t a
, int16x8_t b
) {
304 return vsetq_lane_s16(a
, b
, 7);
307 // CHECK-LABEL: define{{.*}} <4 x i32> @test_vsetq_lane_s32(i32 noundef %a, <4 x i32> noundef %b) #0 {
308 // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x i32> %b, i32 %a, i32 3
309 // CHECK: ret <4 x i32> [[VSET_LANE]]
310 int32x4_t
test_vsetq_lane_s32(int32_t a
, int32x4_t b
) {
311 return vsetq_lane_s32(a
, b
, 3);
314 // CHECK-LABEL: define{{.*}} <16 x i8> @test_vsetq_lane_p8(i8 noundef %a, <16 x i8> noundef %b) #0 {
315 // CHECK: [[VSET_LANE:%.*]] = insertelement <16 x i8> %b, i8 %a, i32 15
316 // CHECK: ret <16 x i8> [[VSET_LANE]]
317 poly8x16_t
test_vsetq_lane_p8(poly8_t a
, poly8x16_t b
) {
318 return vsetq_lane_p8(a
, b
, 15);
321 // CHECK-LABEL: define{{.*}} <8 x i16> @test_vsetq_lane_p16(i16 noundef %a, <8 x i16> noundef %b) #0 {
322 // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> %b, i16 %a, i32 7
323 // CHECK: ret <8 x i16> [[VSET_LANE]]
324 poly16x8_t
test_vsetq_lane_p16(poly16_t a
, poly16x8_t b
) {
325 return vsetq_lane_p16(a
, b
, 7);
328 // CHECK-LABEL: define{{.*}} <4 x float> @test_vsetq_lane_f32(float noundef %a, <4 x float> noundef %b) #0 {
329 // CHECK: [[VSET_LANE:%.*]] = insertelement <4 x float> %b, float %a, i32 3
330 // CHECK: ret <4 x float> [[VSET_LANE]]
331 float32x4_t
test_vsetq_lane_f32(float32_t a
, float32x4_t b
) {
332 return vsetq_lane_f32(a
, b
, 3);
335 // CHECK-LABEL: define{{.*}} <8 x half> @test_vsetq_lane_f16(ptr noundef %a, <8 x half> noundef %b) #0 {
336 // CHECK: [[__REINT_248:%.*]] = alloca half, align 2
337 // CHECK: [[__REINT1_248:%.*]] = alloca <8 x half>, align 16
338 // CHECK: [[__REINT2_248:%.*]] = alloca <8 x i16>, align 16
339 // CHECK: [[TMP0:%.*]] = load half, ptr %a, align 2
340 // CHECK: store half [[TMP0]], ptr [[__REINT_248]], align 2
341 // CHECK: store <8 x half> %b, ptr [[__REINT1_248]], align 16
342 // CHECK: [[TMP2:%.*]] = load i16, ptr [[__REINT_248]], align 2
343 // CHECK: [[TMP4:%.*]] = load <8 x i16>, ptr [[__REINT1_248]], align 16
344 // CHECK: [[VSET_LANE:%.*]] = insertelement <8 x i16> [[TMP4]], i16 [[TMP2]], i32 7
345 // CHECK: store <8 x i16> [[VSET_LANE]], ptr [[__REINT2_248]], align 16
346 // CHECK: [[TMP8:%.*]] = load <8 x half>, ptr [[__REINT2_248]], align 16
347 // CHECK: ret <8 x half> [[TMP8]]
348 float16x8_t
test_vsetq_lane_f16(float16_t
*a
, float16x8_t b
) {
349 return vsetq_lane_f16(*a
, b
, 7);
352 // CHECK-LABEL: define{{.*}} <1 x i64> @test_vset_lane_s64(i64 noundef %a, <1 x i64> noundef %b) #0 {
353 // CHECK: [[VSET_LANE:%.*]] = insertelement <1 x i64> %b, i64 %a, i32 0
354 // CHECK: ret <1 x i64> [[VSET_LANE]]
355 int64x1_t
test_vset_lane_s64(int64_t a
, int64x1_t b
) {
356 return vset_lane_s64(a
, b
, 0);
359 // CHECK-LABEL: define{{.*}} <1 x i64> @test_vset_lane_u64(i64 noundef %a, <1 x i64> noundef %b) #0 {
360 // CHECK: [[VSET_LANE:%.*]] = insertelement <1 x i64> %b, i64 %a, i32 0
361 // CHECK: ret <1 x i64> [[VSET_LANE]]
362 uint64x1_t
test_vset_lane_u64(uint64_t a
, uint64x1_t b
) {
363 return vset_lane_u64(a
, b
, 0);
366 // CHECK-LABEL: define{{.*}} <2 x i64> @test_vsetq_lane_s64(i64 noundef %a, <2 x i64> noundef %b) #0 {
367 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %b, i64 %a, i32 1
368 // CHECK: ret <2 x i64> [[VSET_LANE]]
369 int64x2_t
test_vsetq_lane_s64(int64_t a
, int64x2_t b
) {
370 return vsetq_lane_s64(a
, b
, 1);
373 // CHECK-LABEL: define{{.*}} <2 x i64> @test_vsetq_lane_u64(i64 noundef %a, <2 x i64> noundef %b) #0 {
374 // CHECK: [[VSET_LANE:%.*]] = insertelement <2 x i64> %b, i64 %a, i32 1
375 // CHECK: ret <2 x i64> [[VSET_LANE]]
376 uint64x2_t
test_vsetq_lane_u64(uint64_t a
, uint64x2_t b
) {
377 return vsetq_lane_u64(a
, b
, 1);