[LLVM][IR] Use splat syntax when printing ConstantExpr based splats. (#116856)
[llvm-project.git] / clang / test / CodeGen / arm-mve-intrinsics / vrev.c
blobd584e7e858b6847bd3dd18dbf0c9059d83ace080
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -emit-llvm -o - %s | opt -S -passes='mem2reg,sroa,early-cse<>' | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
7 #include <arm_mve.h>
9 // CHECK-LABEL: @test_vrev16q_s8(
10 // CHECK-NEXT: entry:
11 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
12 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
14 int8x16_t test_vrev16q_s8(int8x16_t a)
16 #ifdef POLYMORPHIC
17 return vrev16q(a);
18 #else /* POLYMORPHIC */
19 return vrev16q_s8(a);
20 #endif /* POLYMORPHIC */
23 // CHECK-LABEL: @test_vrev16q_u8(
24 // CHECK-NEXT: entry:
25 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
26 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
28 uint8x16_t test_vrev16q_u8(uint8x16_t a)
30 #ifdef POLYMORPHIC
31 return vrev16q(a);
32 #else /* POLYMORPHIC */
33 return vrev16q_u8(a);
34 #endif /* POLYMORPHIC */
37 // CHECK-LABEL: @test_vrev32q_s8(
38 // CHECK-NEXT: entry:
39 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
40 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
42 int8x16_t test_vrev32q_s8(int8x16_t a)
44 #ifdef POLYMORPHIC
45 return vrev32q(a);
46 #else /* POLYMORPHIC */
47 return vrev32q_s8(a);
48 #endif /* POLYMORPHIC */
51 // CHECK-LABEL: @test_vrev32q_u8(
52 // CHECK-NEXT: entry:
53 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> poison, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
54 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
56 uint8x16_t test_vrev32q_u8(uint8x16_t a)
58 #ifdef POLYMORPHIC
59 return vrev32q(a);
60 #else /* POLYMORPHIC */
61 return vrev32q_u8(a);
62 #endif /* POLYMORPHIC */
65 // CHECK-LABEL: @test_vrev32q_s16(
66 // CHECK-NEXT: entry:
67 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
68 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
70 int16x8_t test_vrev32q_s16(int16x8_t a)
72 #ifdef POLYMORPHIC
73 return vrev32q(a);
74 #else /* POLYMORPHIC */
75 return vrev32q_s16(a);
76 #endif /* POLYMORPHIC */
79 // CHECK-LABEL: @test_vrev32q_u16(
80 // CHECK-NEXT: entry:
81 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
82 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
84 uint16x8_t test_vrev32q_u16(uint16x8_t a)
86 #ifdef POLYMORPHIC
87 return vrev32q(a);
88 #else /* POLYMORPHIC */
89 return vrev32q_u16(a);
90 #endif /* POLYMORPHIC */
93 // CHECK-LABEL: @test_vrev32q_f16(
94 // CHECK-NEXT: entry:
95 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x half> [[A:%.*]], <8 x half> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
96 // CHECK-NEXT: ret <8 x half> [[TMP0]]
98 float16x8_t test_vrev32q_f16(float16x8_t a)
100 #ifdef POLYMORPHIC
101 return vrev32q(a);
102 #else /* POLYMORPHIC */
103 return vrev32q_f16(a);
104 #endif /* POLYMORPHIC */
107 // CHECK-LABEL: @test_vrev64q_s8(
108 // CHECK-NEXT: entry:
109 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> poison, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
110 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
112 int8x16_t test_vrev64q_s8(int8x16_t a)
114 #ifdef POLYMORPHIC
115 return vrev64q(a);
116 #else /* POLYMORPHIC */
117 return vrev64q_s8(a);
118 #endif /* POLYMORPHIC */
121 // CHECK-LABEL: @test_vrev64q_u8(
122 // CHECK-NEXT: entry:
123 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <16 x i8> [[A:%.*]], <16 x i8> poison, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
124 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
126 uint8x16_t test_vrev64q_u8(uint8x16_t a)
128 #ifdef POLYMORPHIC
129 return vrev64q(a);
130 #else /* POLYMORPHIC */
131 return vrev64q_u8(a);
132 #endif /* POLYMORPHIC */
135 // CHECK-LABEL: @test_vrev64q_s16(
136 // CHECK-NEXT: entry:
137 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
138 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
140 int16x8_t test_vrev64q_s16(int16x8_t a)
142 #ifdef POLYMORPHIC
143 return vrev64q(a);
144 #else /* POLYMORPHIC */
145 return vrev64q_s16(a);
146 #endif /* POLYMORPHIC */
149 // CHECK-LABEL: @test_vrev64q_u16(
150 // CHECK-NEXT: entry:
151 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x i16> [[A:%.*]], <8 x i16> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
152 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
154 uint16x8_t test_vrev64q_u16(uint16x8_t a)
156 #ifdef POLYMORPHIC
157 return vrev64q(a);
158 #else /* POLYMORPHIC */
159 return vrev64q_u16(a);
160 #endif /* POLYMORPHIC */
163 // CHECK-LABEL: @test_vrev64q_f16(
164 // CHECK-NEXT: entry:
165 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <8 x half> [[A:%.*]], <8 x half> poison, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
166 // CHECK-NEXT: ret <8 x half> [[TMP0]]
168 float16x8_t test_vrev64q_f16(float16x8_t a)
170 #ifdef POLYMORPHIC
171 return vrev64q(a);
172 #else /* POLYMORPHIC */
173 return vrev64q_f16(a);
174 #endif /* POLYMORPHIC */
177 // CHECK-LABEL: @test_vrev64q_f32(
178 // CHECK-NEXT: entry:
179 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
180 // CHECK-NEXT: ret <4 x float> [[TMP0]]
182 float32x4_t test_vrev64q_f32(float32x4_t a)
184 #ifdef POLYMORPHIC
185 return vrev64q(a);
186 #else /* POLYMORPHIC */
187 return vrev64q_f32(a);
188 #endif /* POLYMORPHIC */
191 // CHECK-LABEL: @test_vrev64q_s32(
192 // CHECK-NEXT: entry:
193 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[A:%.*]], <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
194 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
196 int32x4_t test_vrev64q_s32(int32x4_t a)
198 #ifdef POLYMORPHIC
199 return vrev64q(a);
200 #else /* POLYMORPHIC */
201 return vrev64q_s32(a);
202 #endif /* POLYMORPHIC */
205 // CHECK-LABEL: @test_vrev64q_u32(
206 // CHECK-NEXT: entry:
207 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[A:%.*]], <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
208 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
210 uint32x4_t test_vrev64q_u32(uint32x4_t a)
212 #ifdef POLYMORPHIC
213 return vrev64q(a);
214 #else /* POLYMORPHIC */
215 return vrev64q_u32(a);
216 #endif /* POLYMORPHIC */
219 // CHECK-LABEL: @test_vrev16q_m_s8(
220 // CHECK-NEXT: entry:
221 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
222 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
223 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 16, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
224 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
226 int8x16_t test_vrev16q_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
228 #ifdef POLYMORPHIC
229 return vrev16q_m(inactive, a, p);
230 #else /* POLYMORPHIC */
231 return vrev16q_m_s8(inactive, a, p);
232 #endif /* POLYMORPHIC */
235 // CHECK-LABEL: @test_vrev16q_m_u8(
236 // CHECK-NEXT: entry:
237 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
238 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
239 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 16, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
240 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
242 uint8x16_t test_vrev16q_m_u8(uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
244 #ifdef POLYMORPHIC
245 return vrev16q_m(inactive, a, p);
246 #else /* POLYMORPHIC */
247 return vrev16q_m_u8(inactive, a, p);
248 #endif /* POLYMORPHIC */
251 // CHECK-LABEL: @test_vrev32q_m_f16(
252 // CHECK-NEXT: entry:
253 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
254 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
255 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrev.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], i32 32, <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
256 // CHECK-NEXT: ret <8 x half> [[TMP2]]
258 float16x8_t test_vrev32q_m_f16(float16x8_t inactive, float16x8_t a, mve_pred16_t p)
260 #ifdef POLYMORPHIC
261 return vrev32q_m(inactive, a, p);
262 #else /* POLYMORPHIC */
263 return vrev32q_m_f16(inactive, a, p);
264 #endif /* POLYMORPHIC */
267 // CHECK-LABEL: @test_vrev32q_m_s8(
268 // CHECK-NEXT: entry:
269 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
270 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
271 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 32, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
272 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
274 int8x16_t test_vrev32q_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
276 #ifdef POLYMORPHIC
277 return vrev32q_m(inactive, a, p);
278 #else /* POLYMORPHIC */
279 return vrev32q_m_s8(inactive, a, p);
280 #endif /* POLYMORPHIC */
283 // CHECK-LABEL: @test_vrev32q_m_s16(
284 // CHECK-NEXT: entry:
285 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
286 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
287 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrev.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 32, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
288 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
290 int16x8_t test_vrev32q_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
292 #ifdef POLYMORPHIC
293 return vrev32q_m(inactive, a, p);
294 #else /* POLYMORPHIC */
295 return vrev32q_m_s16(inactive, a, p);
296 #endif /* POLYMORPHIC */
299 // CHECK-LABEL: @test_vrev32q_m_u8(
300 // CHECK-NEXT: entry:
301 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
302 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
303 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 32, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
304 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
306 uint8x16_t test_vrev32q_m_u8(uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
308 #ifdef POLYMORPHIC
309 return vrev32q_m(inactive, a, p);
310 #else /* POLYMORPHIC */
311 return vrev32q_m_u8(inactive, a, p);
312 #endif /* POLYMORPHIC */
315 // CHECK-LABEL: @test_vrev32q_m_u16(
316 // CHECK-NEXT: entry:
317 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
318 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
319 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrev.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 32, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
320 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
322 uint16x8_t test_vrev32q_m_u16(uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
324 #ifdef POLYMORPHIC
325 return vrev32q_m(inactive, a, p);
326 #else /* POLYMORPHIC */
327 return vrev32q_m_u16(inactive, a, p);
328 #endif /* POLYMORPHIC */
331 // CHECK-LABEL: @test_vrev64q_m_f16(
332 // CHECK-NEXT: entry:
333 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
334 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
335 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrev.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], i32 64, <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
336 // CHECK-NEXT: ret <8 x half> [[TMP2]]
338 float16x8_t test_vrev64q_m_f16(float16x8_t inactive, float16x8_t a, mve_pred16_t p)
340 #ifdef POLYMORPHIC
341 return vrev64q_m(inactive, a, p);
342 #else /* POLYMORPHIC */
343 return vrev64q_m_f16(inactive, a, p);
344 #endif /* POLYMORPHIC */
347 // CHECK-LABEL: @test_vrev64q_m_f32(
348 // CHECK-NEXT: entry:
349 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
350 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
351 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrev.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], i32 64, <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
352 // CHECK-NEXT: ret <4 x float> [[TMP2]]
354 float32x4_t test_vrev64q_m_f32(float32x4_t inactive, float32x4_t a, mve_pred16_t p)
356 #ifdef POLYMORPHIC
357 return vrev64q_m(inactive, a, p);
358 #else /* POLYMORPHIC */
359 return vrev64q_m_f32(inactive, a, p);
360 #endif /* POLYMORPHIC */
363 // CHECK-LABEL: @test_vrev64q_m_s8(
364 // CHECK-NEXT: entry:
365 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
366 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
367 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 64, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
368 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
370 int8x16_t test_vrev64q_m_s8(int8x16_t inactive, int8x16_t a, mve_pred16_t p)
372 #ifdef POLYMORPHIC
373 return vrev64q_m(inactive, a, p);
374 #else /* POLYMORPHIC */
375 return vrev64q_m_s8(inactive, a, p);
376 #endif /* POLYMORPHIC */
379 // CHECK-LABEL: @test_vrev64q_m_s16(
380 // CHECK-NEXT: entry:
381 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
382 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
383 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrev.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 64, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
384 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
386 int16x8_t test_vrev64q_m_s16(int16x8_t inactive, int16x8_t a, mve_pred16_t p)
388 #ifdef POLYMORPHIC
389 return vrev64q_m(inactive, a, p);
390 #else /* POLYMORPHIC */
391 return vrev64q_m_s16(inactive, a, p);
392 #endif /* POLYMORPHIC */
395 // CHECK-LABEL: @test_vrev64q_m_s32(
396 // CHECK-NEXT: entry:
397 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
398 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
399 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vrev.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 64, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
400 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
402 int32x4_t test_vrev64q_m_s32(int32x4_t inactive, int32x4_t a, mve_pred16_t p)
404 #ifdef POLYMORPHIC
405 return vrev64q_m(inactive, a, p);
406 #else /* POLYMORPHIC */
407 return vrev64q_m_s32(inactive, a, p);
408 #endif /* POLYMORPHIC */
411 // CHECK-LABEL: @test_vrev64q_m_u8(
412 // CHECK-NEXT: entry:
413 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
414 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
415 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 64, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
416 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
418 uint8x16_t test_vrev64q_m_u8(uint8x16_t inactive, uint8x16_t a, mve_pred16_t p)
420 #ifdef POLYMORPHIC
421 return vrev64q_m(inactive, a, p);
422 #else /* POLYMORPHIC */
423 return vrev64q_m_u8(inactive, a, p);
424 #endif /* POLYMORPHIC */
427 // CHECK-LABEL: @test_vrev64q_m_u16(
428 // CHECK-NEXT: entry:
429 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
430 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
431 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrev.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 64, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
432 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
434 uint16x8_t test_vrev64q_m_u16(uint16x8_t inactive, uint16x8_t a, mve_pred16_t p)
436 #ifdef POLYMORPHIC
437 return vrev64q_m(inactive, a, p);
438 #else /* POLYMORPHIC */
439 return vrev64q_m_u16(inactive, a, p);
440 #endif /* POLYMORPHIC */
443 // CHECK-LABEL: @test_vrev64q_m_u32(
444 // CHECK-NEXT: entry:
445 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
446 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
447 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vrev.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 64, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
448 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
450 uint32x4_t test_vrev64q_m_u32(uint32x4_t inactive, uint32x4_t a, mve_pred16_t p)
452 #ifdef POLYMORPHIC
453 return vrev64q_m(inactive, a, p);
454 #else /* POLYMORPHIC */
455 return vrev64q_m_u32(inactive, a, p);
456 #endif /* POLYMORPHIC */
459 // CHECK-LABEL: @test_vrev16q_x_s8(
460 // CHECK-NEXT: entry:
461 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
462 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
463 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 16, <16 x i1> [[TMP1]], <16 x i8> undef)
464 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
466 int8x16_t test_vrev16q_x_s8(int8x16_t a, mve_pred16_t p)
468 #ifdef POLYMORPHIC
469 return vrev16q_x(a, p);
470 #else /* POLYMORPHIC */
471 return vrev16q_x_s8(a, p);
472 #endif /* POLYMORPHIC */
475 // CHECK-LABEL: @test_vrev16q_x_u8(
476 // CHECK-NEXT: entry:
477 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
478 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
479 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 16, <16 x i1> [[TMP1]], <16 x i8> undef)
480 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
482 uint8x16_t test_vrev16q_x_u8(uint8x16_t a, mve_pred16_t p)
484 #ifdef POLYMORPHIC
485 return vrev16q_x(a, p);
486 #else /* POLYMORPHIC */
487 return vrev16q_x_u8(a, p);
488 #endif /* POLYMORPHIC */
491 // CHECK-LABEL: @test_vrev32q_x_f16(
492 // CHECK-NEXT: entry:
493 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
494 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
495 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrev.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], i32 32, <8 x i1> [[TMP1]], <8 x half> undef)
496 // CHECK-NEXT: ret <8 x half> [[TMP2]]
498 float16x8_t test_vrev32q_x_f16(float16x8_t a, mve_pred16_t p)
500 #ifdef POLYMORPHIC
501 return vrev32q_x(a, p);
502 #else /* POLYMORPHIC */
503 return vrev32q_x_f16(a, p);
504 #endif /* POLYMORPHIC */
507 // CHECK-LABEL: @test_vrev32q_x_s8(
508 // CHECK-NEXT: entry:
509 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
510 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
511 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 32, <16 x i1> [[TMP1]], <16 x i8> undef)
512 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
514 int8x16_t test_vrev32q_x_s8(int8x16_t a, mve_pred16_t p)
516 #ifdef POLYMORPHIC
517 return vrev32q_x(a, p);
518 #else /* POLYMORPHIC */
519 return vrev32q_x_s8(a, p);
520 #endif /* POLYMORPHIC */
523 // CHECK-LABEL: @test_vrev32q_x_s16(
524 // CHECK-NEXT: entry:
525 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
526 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
527 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrev.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 32, <8 x i1> [[TMP1]], <8 x i16> undef)
528 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
530 int16x8_t test_vrev32q_x_s16(int16x8_t a, mve_pred16_t p)
532 #ifdef POLYMORPHIC
533 return vrev32q_x(a, p);
534 #else /* POLYMORPHIC */
535 return vrev32q_x_s16(a, p);
536 #endif /* POLYMORPHIC */
539 // CHECK-LABEL: @test_vrev32q_x_u8(
540 // CHECK-NEXT: entry:
541 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
542 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
543 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 32, <16 x i1> [[TMP1]], <16 x i8> undef)
544 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
546 uint8x16_t test_vrev32q_x_u8(uint8x16_t a, mve_pred16_t p)
548 #ifdef POLYMORPHIC
549 return vrev32q_x(a, p);
550 #else /* POLYMORPHIC */
551 return vrev32q_x_u8(a, p);
552 #endif /* POLYMORPHIC */
555 // CHECK-LABEL: @test_vrev32q_x_u16(
556 // CHECK-NEXT: entry:
557 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
558 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
559 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrev.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 32, <8 x i1> [[TMP1]], <8 x i16> undef)
560 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
562 uint16x8_t test_vrev32q_x_u16(uint16x8_t a, mve_pred16_t p)
564 #ifdef POLYMORPHIC
565 return vrev32q_x(a, p);
566 #else /* POLYMORPHIC */
567 return vrev32q_x_u16(a, p);
568 #endif /* POLYMORPHIC */
571 // CHECK-LABEL: @test_vrev64q_x_f16(
572 // CHECK-NEXT: entry:
573 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
574 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
575 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrev.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], i32 64, <8 x i1> [[TMP1]], <8 x half> undef)
576 // CHECK-NEXT: ret <8 x half> [[TMP2]]
578 float16x8_t test_vrev64q_x_f16(float16x8_t a, mve_pred16_t p)
580 #ifdef POLYMORPHIC
581 return vrev64q_x(a, p);
582 #else /* POLYMORPHIC */
583 return vrev64q_x_f16(a, p);
584 #endif /* POLYMORPHIC */
587 // CHECK-LABEL: @test_vrev64q_x_f32(
588 // CHECK-NEXT: entry:
589 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
590 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
591 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrev.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], i32 64, <4 x i1> [[TMP1]], <4 x float> undef)
592 // CHECK-NEXT: ret <4 x float> [[TMP2]]
594 float32x4_t test_vrev64q_x_f32(float32x4_t a, mve_pred16_t p)
596 #ifdef POLYMORPHIC
597 return vrev64q_x(a, p);
598 #else /* POLYMORPHIC */
599 return vrev64q_x_f32(a, p);
600 #endif /* POLYMORPHIC */
603 // CHECK-LABEL: @test_vrev64q_x_s8(
604 // CHECK-NEXT: entry:
605 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
606 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
607 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 64, <16 x i1> [[TMP1]], <16 x i8> undef)
608 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
610 int8x16_t test_vrev64q_x_s8(int8x16_t a, mve_pred16_t p)
612 #ifdef POLYMORPHIC
613 return vrev64q_x(a, p);
614 #else /* POLYMORPHIC */
615 return vrev64q_x_s8(a, p);
616 #endif /* POLYMORPHIC */
619 // CHECK-LABEL: @test_vrev64q_x_s16(
620 // CHECK-NEXT: entry:
621 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
622 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
623 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrev.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 64, <8 x i1> [[TMP1]], <8 x i16> undef)
624 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
626 int16x8_t test_vrev64q_x_s16(int16x8_t a, mve_pred16_t p)
628 #ifdef POLYMORPHIC
629 return vrev64q_x(a, p);
630 #else /* POLYMORPHIC */
631 return vrev64q_x_s16(a, p);
632 #endif /* POLYMORPHIC */
635 // CHECK-LABEL: @test_vrev64q_x_s32(
636 // CHECK-NEXT: entry:
637 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
638 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
639 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vrev.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 64, <4 x i1> [[TMP1]], <4 x i32> undef)
640 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
642 int32x4_t test_vrev64q_x_s32(int32x4_t a, mve_pred16_t p)
644 #ifdef POLYMORPHIC
645 return vrev64q_x(a, p);
646 #else /* POLYMORPHIC */
647 return vrev64q_x_s32(a, p);
648 #endif /* POLYMORPHIC */
651 // CHECK-LABEL: @test_vrev64q_x_u8(
652 // CHECK-NEXT: entry:
653 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
654 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
655 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vrev.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 64, <16 x i1> [[TMP1]], <16 x i8> undef)
656 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
658 uint8x16_t test_vrev64q_x_u8(uint8x16_t a, mve_pred16_t p)
660 #ifdef POLYMORPHIC
661 return vrev64q_x(a, p);
662 #else /* POLYMORPHIC */
663 return vrev64q_x_u8(a, p);
664 #endif /* POLYMORPHIC */
667 // CHECK-LABEL: @test_vrev64q_x_u16(
668 // CHECK-NEXT: entry:
669 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
670 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
671 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vrev.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 64, <8 x i1> [[TMP1]], <8 x i16> undef)
672 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
674 uint16x8_t test_vrev64q_x_u16(uint16x8_t a, mve_pred16_t p)
676 #ifdef POLYMORPHIC
677 return vrev64q_x(a, p);
678 #else /* POLYMORPHIC */
679 return vrev64q_x_u16(a, p);
680 #endif /* POLYMORPHIC */
683 // CHECK-LABEL: @test_vrev64q_x_u32(
684 // CHECK-NEXT: entry:
685 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
686 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
687 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vrev.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 64, <4 x i1> [[TMP1]], <4 x i32> undef)
688 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
690 uint32x4_t test_vrev64q_x_u32(uint32x4_t a, mve_pred16_t p)
692 #ifdef POLYMORPHIC
693 return vrev64q_x(a, p);
694 #else /* POLYMORPHIC */
695 return vrev64q_x_u32(a, p);
696 #endif /* POLYMORPHIC */