1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve -mfloat-abi hard -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 // CHECK-LABEL: @test_vmovlbq_s8(
11 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
12 // CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i8> [[TMP0]] to <8 x i16>
13 // CHECK-NEXT: ret <8 x i16> [[TMP1]]
15 int16x8_t
test_vmovlbq_s8(int8x16_t a
)
19 #else /* POLYMORPHIC */
21 #endif /* POLYMORPHIC */
24 // CHECK-LABEL: @test_vmovlbq_s16(
26 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
27 // CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[TMP0]] to <4 x i32>
28 // CHECK-NEXT: ret <4 x i32> [[TMP1]]
30 int32x4_t
test_vmovlbq_s16(int16x8_t a
)
34 #else /* POLYMORPHIC */
35 return vmovlbq_s16(a
);
36 #endif /* POLYMORPHIC */
39 // CHECK-LABEL: @test_vmovlbq_u8(
41 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> poison, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
42 // CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i16>
43 // CHECK-NEXT: ret <8 x i16> [[TMP1]]
45 uint16x8_t
test_vmovlbq_u8(uint8x16_t a
)
49 #else /* POLYMORPHIC */
51 #endif /* POLYMORPHIC */
54 // CHECK-LABEL: @test_vmovlbq_u16(
56 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
57 // CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i16> [[TMP0]] to <4 x i32>
58 // CHECK-NEXT: ret <4 x i32> [[TMP1]]
60 uint32x4_t
test_vmovlbq_u16(uint16x8_t a
)
64 #else /* POLYMORPHIC */
65 return vmovlbq_u16(a
);
66 #endif /* POLYMORPHIC */
69 // CHECK-LABEL: @test_vmovltq_s8(
71 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
72 // CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i8> [[TMP0]] to <8 x i16>
73 // CHECK-NEXT: ret <8 x i16> [[TMP1]]
75 int16x8_t
test_vmovltq_s8(int8x16_t a
)
79 #else /* POLYMORPHIC */
81 #endif /* POLYMORPHIC */
84 // CHECK-LABEL: @test_vmovltq_s16(
86 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
87 // CHECK-NEXT: [[TMP1:%.*]] = sext <4 x i16> [[TMP0]] to <4 x i32>
88 // CHECK-NEXT: ret <4 x i32> [[TMP1]]
90 int32x4_t
test_vmovltq_s16(int16x8_t a
)
94 #else /* POLYMORPHIC */
95 return vmovltq_s16(a
);
96 #endif /* POLYMORPHIC */
99 // CHECK-LABEL: @test_vmovltq_u8(
100 // CHECK-NEXT: entry:
101 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> poison, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
102 // CHECK-NEXT: [[TMP1:%.*]] = zext <8 x i8> [[TMP0]] to <8 x i16>
103 // CHECK-NEXT: ret <8 x i16> [[TMP1]]
105 uint16x8_t
test_vmovltq_u8(uint8x16_t a
)
109 #else /* POLYMORPHIC */
110 return vmovltq_u8(a
);
111 #endif /* POLYMORPHIC */
114 // CHECK-LABEL: @test_vmovltq_u16(
115 // CHECK-NEXT: entry:
116 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
117 // CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i16> [[TMP0]] to <4 x i32>
118 // CHECK-NEXT: ret <4 x i32> [[TMP1]]
120 uint32x4_t
test_vmovltq_u16(uint16x8_t a
)
124 #else /* POLYMORPHIC */
125 return vmovltq_u16(a
);
126 #endif /* POLYMORPHIC */
129 // CHECK-LABEL: @test_vmovlbq_m_s8(
130 // CHECK-NEXT: entry:
131 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
132 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
133 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vmovl.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 0, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
134 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
136 int16x8_t
test_vmovlbq_m_s8(int16x8_t inactive
, int8x16_t a
, mve_pred16_t p
)
139 return vmovlbq_m(inactive
, a
, p
);
140 #else /* POLYMORPHIC */
141 return vmovlbq_m_s8(inactive
, a
, p
);
142 #endif /* POLYMORPHIC */
145 // CHECK-LABEL: @test_vmovlbq_m_s16(
146 // CHECK-NEXT: entry:
147 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
148 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
149 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmovl.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 0, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
150 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
152 int32x4_t
test_vmovlbq_m_s16(int32x4_t inactive
, int16x8_t a
, mve_pred16_t p
)
155 return vmovlbq_m(inactive
, a
, p
);
156 #else /* POLYMORPHIC */
157 return vmovlbq_m_s16(inactive
, a
, p
);
158 #endif /* POLYMORPHIC */
161 // CHECK-LABEL: @test_vmovlbq_m_u8(
162 // CHECK-NEXT: entry:
163 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
164 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
165 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vmovl.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 1, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
166 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
168 uint16x8_t
test_vmovlbq_m_u8(uint16x8_t inactive
, uint8x16_t a
, mve_pred16_t p
)
171 return vmovlbq_m(inactive
, a
, p
);
172 #else /* POLYMORPHIC */
173 return vmovlbq_m_u8(inactive
, a
, p
);
174 #endif /* POLYMORPHIC */
177 // CHECK-LABEL: @test_vmovlbq_m_u16(
178 // CHECK-NEXT: entry:
179 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
180 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
181 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmovl.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 1, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
182 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
184 uint32x4_t
test_vmovlbq_m_u16(uint32x4_t inactive
, uint16x8_t a
, mve_pred16_t p
)
187 return vmovlbq_m(inactive
, a
, p
);
188 #else /* POLYMORPHIC */
189 return vmovlbq_m_u16(inactive
, a
, p
);
190 #endif /* POLYMORPHIC */
193 // CHECK-LABEL: @test_vmovltq_m_s8(
194 // CHECK-NEXT: entry:
195 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
196 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
197 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vmovl.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 0, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
198 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
200 int16x8_t
test_vmovltq_m_s8(int16x8_t inactive
, int8x16_t a
, mve_pred16_t p
)
203 return vmovltq_m(inactive
, a
, p
);
204 #else /* POLYMORPHIC */
205 return vmovltq_m_s8(inactive
, a
, p
);
206 #endif /* POLYMORPHIC */
209 // CHECK-LABEL: @test_vmovltq_m_s16(
210 // CHECK-NEXT: entry:
211 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
212 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
213 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmovl.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 0, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
214 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
216 int32x4_t
test_vmovltq_m_s16(int32x4_t inactive
, int16x8_t a
, mve_pred16_t p
)
219 return vmovltq_m(inactive
, a
, p
);
220 #else /* POLYMORPHIC */
221 return vmovltq_m_s16(inactive
, a
, p
);
222 #endif /* POLYMORPHIC */
225 // CHECK-LABEL: @test_vmovltq_m_u8(
226 // CHECK-NEXT: entry:
227 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
228 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
229 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vmovl.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 1, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
230 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
232 uint16x8_t
test_vmovltq_m_u8(uint16x8_t inactive
, uint8x16_t a
, mve_pred16_t p
)
235 return vmovltq_m(inactive
, a
, p
);
236 #else /* POLYMORPHIC */
237 return vmovltq_m_u8(inactive
, a
, p
);
238 #endif /* POLYMORPHIC */
241 // CHECK-LABEL: @test_vmovltq_m_u16(
242 // CHECK-NEXT: entry:
243 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
244 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
245 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmovl.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 1, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
246 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
248 uint32x4_t
test_vmovltq_m_u16(uint32x4_t inactive
, uint16x8_t a
, mve_pred16_t p
)
251 return vmovltq_m(inactive
, a
, p
);
252 #else /* POLYMORPHIC */
253 return vmovltq_m_u16(inactive
, a
, p
);
254 #endif /* POLYMORPHIC */
257 // CHECK-LABEL: @test_vmovlbq_x_s8(
258 // CHECK-NEXT: entry:
259 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
260 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
261 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vmovl.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 0, i32 0, <8 x i1> [[TMP1]], <8 x i16> undef)
262 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
264 int16x8_t
test_vmovlbq_x_s8(int8x16_t a
, mve_pred16_t p
)
267 return vmovlbq_x(a
, p
);
268 #else /* POLYMORPHIC */
269 return vmovlbq_x_s8(a
, p
);
270 #endif /* POLYMORPHIC */
273 // CHECK-LABEL: @test_vmovlbq_x_s16(
274 // CHECK-NEXT: entry:
275 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
276 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
277 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmovl.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 0, i32 0, <4 x i1> [[TMP1]], <4 x i32> undef)
278 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
280 int32x4_t
test_vmovlbq_x_s16(int16x8_t a
, mve_pred16_t p
)
283 return vmovlbq_x(a
, p
);
284 #else /* POLYMORPHIC */
285 return vmovlbq_x_s16(a
, p
);
286 #endif /* POLYMORPHIC */
289 // CHECK-LABEL: @test_vmovlbq_x_u8(
290 // CHECK-NEXT: entry:
291 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
292 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
293 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vmovl.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 1, i32 0, <8 x i1> [[TMP1]], <8 x i16> undef)
294 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
296 uint16x8_t
test_vmovlbq_x_u8(uint8x16_t a
, mve_pred16_t p
)
299 return vmovlbq_x(a
, p
);
300 #else /* POLYMORPHIC */
301 return vmovlbq_x_u8(a
, p
);
302 #endif /* POLYMORPHIC */
305 // CHECK-LABEL: @test_vmovlbq_x_u16(
306 // CHECK-NEXT: entry:
307 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
308 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
309 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmovl.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 1, i32 0, <4 x i1> [[TMP1]], <4 x i32> undef)
310 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
312 uint32x4_t
test_vmovlbq_x_u16(uint16x8_t a
, mve_pred16_t p
)
315 return vmovlbq_x(a
, p
);
316 #else /* POLYMORPHIC */
317 return vmovlbq_x_u16(a
, p
);
318 #endif /* POLYMORPHIC */
321 // CHECK-LABEL: @test_vmovltq_x_s8(
322 // CHECK-NEXT: entry:
323 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
324 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
325 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vmovl.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 0, i32 1, <8 x i1> [[TMP1]], <8 x i16> undef)
326 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
328 int16x8_t
test_vmovltq_x_s8(int8x16_t a
, mve_pred16_t p
)
331 return vmovltq_x(a
, p
);
332 #else /* POLYMORPHIC */
333 return vmovltq_x_s8(a
, p
);
334 #endif /* POLYMORPHIC */
337 // CHECK-LABEL: @test_vmovltq_x_s16(
338 // CHECK-NEXT: entry:
339 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
340 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
341 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmovl.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 0, i32 1, <4 x i1> [[TMP1]], <4 x i32> undef)
342 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
344 int32x4_t
test_vmovltq_x_s16(int16x8_t a
, mve_pred16_t p
)
347 return vmovltq_x(a
, p
);
348 #else /* POLYMORPHIC */
349 return vmovltq_x_s16(a
, p
);
350 #endif /* POLYMORPHIC */
353 // CHECK-LABEL: @test_vmovltq_x_u8(
354 // CHECK-NEXT: entry:
355 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
356 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
357 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vmovl.predicated.v8i16.v16i8.v8i1(<16 x i8> [[A:%.*]], i32 1, i32 1, <8 x i1> [[TMP1]], <8 x i16> undef)
358 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
360 uint16x8_t
test_vmovltq_x_u8(uint8x16_t a
, mve_pred16_t p
)
363 return vmovltq_x(a
, p
);
364 #else /* POLYMORPHIC */
365 return vmovltq_x_u8(a
, p
);
366 #endif /* POLYMORPHIC */
369 // CHECK-LABEL: @test_vmovltq_x_u16(
370 // CHECK-NEXT: entry:
371 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
372 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
373 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vmovl.predicated.v4i32.v8i16.v4i1(<8 x i16> [[A:%.*]], i32 1, i32 1, <4 x i1> [[TMP1]], <4 x i32> undef)
374 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
376 uint32x4_t
test_vmovltq_x_u16(uint16x8_t a
, mve_pred16_t p
)
379 return vmovltq_x(a
, p
);
380 #else /* POLYMORPHIC */
381 return vmovltq_x_u16(a
, p
);
382 #endif /* POLYMORPHIC */