1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 // CHECK-LABEL: @test_vshlq_s8(
11 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.v16i8.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 0, i32 0)
12 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
14 int8x16_t
test_vshlq_s8(int8x16_t a
, int8x16_t b
)
18 #else /* POLYMORPHIC */
19 return vshlq_s8(a
, b
);
20 #endif /* POLYMORPHIC */
23 // CHECK-LABEL: @test_vshlq_s16(
25 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.v8i16.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 0)
26 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
28 int16x8_t
test_vshlq_s16(int16x8_t a
, int16x8_t b
)
32 #else /* POLYMORPHIC */
33 return vshlq_s16(a
, b
);
34 #endif /* POLYMORPHIC */
37 // CHECK-LABEL: @test_vshlq_s32(
39 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.v4i32.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 0)
40 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
42 int32x4_t
test_vshlq_s32(int32x4_t a
, int32x4_t b
)
46 #else /* POLYMORPHIC */
47 return vshlq_s32(a
, b
);
48 #endif /* POLYMORPHIC */
51 // CHECK-LABEL: @test_vshlq_u8(
53 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.v16i8.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 0, i32 1)
54 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
56 uint8x16_t
test_vshlq_u8(uint8x16_t a
, int8x16_t b
)
60 #else /* POLYMORPHIC */
61 return vshlq_u8(a
, b
);
62 #endif /* POLYMORPHIC */
65 // CHECK-LABEL: @test_vshlq_u16(
67 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.v8i16.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 1)
68 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
70 uint16x8_t
test_vshlq_u16(uint16x8_t a
, int16x8_t b
)
74 #else /* POLYMORPHIC */
75 return vshlq_u16(a
, b
);
76 #endif /* POLYMORPHIC */
79 // CHECK-LABEL: @test_vshlq_u32(
81 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.v4i32.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 1)
82 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
84 uint32x4_t
test_vshlq_u32(uint32x4_t a
, int32x4_t b
)
88 #else /* POLYMORPHIC */
89 return vshlq_u32(a
, b
);
90 #endif /* POLYMORPHIC */
93 // CHECK-LABEL: @test_vshlq_r_s8(
95 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.v16i8(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 0)
96 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
98 int8x16_t
test_vshlq_r_s8(int8x16_t a
, int32_t b
)
101 return vshlq_r(a
, b
);
102 #else /* POLYMORPHIC */
103 return vshlq_r_s8(a
, b
);
104 #endif /* POLYMORPHIC */
107 // CHECK-LABEL: @test_vshlq_r_s16(
108 // CHECK-NEXT: entry:
109 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.v8i16(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 0)
110 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
112 int16x8_t
test_vshlq_r_s16(int16x8_t a
, int32_t b
)
115 return vshlq_r(a
, b
);
116 #else /* POLYMORPHIC */
117 return vshlq_r_s16(a
, b
);
118 #endif /* POLYMORPHIC */
121 // CHECK-LABEL: @test_vshlq_r_s32(
122 // CHECK-NEXT: entry:
123 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.v4i32(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 0)
124 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
126 int32x4_t
test_vshlq_r_s32(int32x4_t a
, int32_t b
)
129 return vshlq_r(a
, b
);
130 #else /* POLYMORPHIC */
131 return vshlq_r_s32(a
, b
);
132 #endif /* POLYMORPHIC */
135 // CHECK-LABEL: @test_vshlq_r_u8(
136 // CHECK-NEXT: entry:
137 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.v16i8(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 1)
138 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
140 uint8x16_t
test_vshlq_r_u8(uint8x16_t a
, int32_t b
)
143 return vshlq_r(a
, b
);
144 #else /* POLYMORPHIC */
145 return vshlq_r_u8(a
, b
);
146 #endif /* POLYMORPHIC */
149 // CHECK-LABEL: @test_vshlq_r_u16(
150 // CHECK-NEXT: entry:
151 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.v8i16(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 1)
152 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
154 uint16x8_t
test_vshlq_r_u16(uint16x8_t a
, int32_t b
)
157 return vshlq_r(a
, b
);
158 #else /* POLYMORPHIC */
159 return vshlq_r_u16(a
, b
);
160 #endif /* POLYMORPHIC */
163 // CHECK-LABEL: @test_vshlq_r_u32(
164 // CHECK-NEXT: entry:
165 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.v4i32(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 1)
166 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
168 uint32x4_t
test_vshlq_r_u32(uint32x4_t a
, int32_t b
)
171 return vshlq_r(a
, b
);
172 #else /* POLYMORPHIC */
173 return vshlq_r_u32(a
, b
);
174 #endif /* POLYMORPHIC */
177 // CHECK-LABEL: @test_vqshlq_s8(
178 // CHECK-NEXT: entry:
179 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.v16i8.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 0, i32 0)
180 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
182 int8x16_t
test_vqshlq_s8(int8x16_t a
, int8x16_t b
)
186 #else /* POLYMORPHIC */
187 return vqshlq_s8(a
, b
);
188 #endif /* POLYMORPHIC */
191 // CHECK-LABEL: @test_vqshlq_s16(
192 // CHECK-NEXT: entry:
193 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.v8i16.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0)
194 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
196 int16x8_t
test_vqshlq_s16(int16x8_t a
, int16x8_t b
)
200 #else /* POLYMORPHIC */
201 return vqshlq_s16(a
, b
);
202 #endif /* POLYMORPHIC */
205 // CHECK-LABEL: @test_vqshlq_s32(
206 // CHECK-NEXT: entry:
207 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.v4i32.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 0)
208 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
210 int32x4_t
test_vqshlq_s32(int32x4_t a
, int32x4_t b
)
214 #else /* POLYMORPHIC */
215 return vqshlq_s32(a
, b
);
216 #endif /* POLYMORPHIC */
219 // CHECK-LABEL: @test_vqshlq_u8(
220 // CHECK-NEXT: entry:
221 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.v16i8.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 0, i32 1)
222 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
224 uint8x16_t
test_vqshlq_u8(uint8x16_t a
, int8x16_t b
)
228 #else /* POLYMORPHIC */
229 return vqshlq_u8(a
, b
);
230 #endif /* POLYMORPHIC */
233 // CHECK-LABEL: @test_vqshlq_u16(
234 // CHECK-NEXT: entry:
235 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.v8i16.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 1)
236 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
238 uint16x8_t
test_vqshlq_u16(uint16x8_t a
, int16x8_t b
)
242 #else /* POLYMORPHIC */
243 return vqshlq_u16(a
, b
);
244 #endif /* POLYMORPHIC */
247 // CHECK-LABEL: @test_vqshlq_u32(
248 // CHECK-NEXT: entry:
249 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.v4i32.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 1)
250 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
252 uint32x4_t
test_vqshlq_u32(uint32x4_t a
, int32x4_t b
)
256 #else /* POLYMORPHIC */
257 return vqshlq_u32(a
, b
);
258 #endif /* POLYMORPHIC */
261 // CHECK-LABEL: @test_vqshlq_r_s8(
262 // CHECK-NEXT: entry:
263 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.v16i8(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 0)
264 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
266 int8x16_t
test_vqshlq_r_s8(int8x16_t a
, int32_t b
)
269 return vqshlq_r(a
, b
);
270 #else /* POLYMORPHIC */
271 return vqshlq_r_s8(a
, b
);
272 #endif /* POLYMORPHIC */
275 // CHECK-LABEL: @test_vqshlq_r_s16(
276 // CHECK-NEXT: entry:
277 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.v8i16(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 0)
278 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
280 int16x8_t
test_vqshlq_r_s16(int16x8_t a
, int32_t b
)
283 return vqshlq_r(a
, b
);
284 #else /* POLYMORPHIC */
285 return vqshlq_r_s16(a
, b
);
286 #endif /* POLYMORPHIC */
289 // CHECK-LABEL: @test_vqshlq_r_s32(
290 // CHECK-NEXT: entry:
291 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.v4i32(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 0)
292 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
294 int32x4_t
test_vqshlq_r_s32(int32x4_t a
, int32_t b
)
297 return vqshlq_r(a
, b
);
298 #else /* POLYMORPHIC */
299 return vqshlq_r_s32(a
, b
);
300 #endif /* POLYMORPHIC */
303 // CHECK-LABEL: @test_vqshlq_r_u8(
304 // CHECK-NEXT: entry:
305 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.v16i8(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 1)
306 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
308 uint8x16_t
test_vqshlq_r_u8(uint8x16_t a
, int32_t b
)
311 return vqshlq_r(a
, b
);
312 #else /* POLYMORPHIC */
313 return vqshlq_r_u8(a
, b
);
314 #endif /* POLYMORPHIC */
317 // CHECK-LABEL: @test_vqshlq_r_u16(
318 // CHECK-NEXT: entry:
319 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.v8i16(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 1)
320 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
322 uint16x8_t
test_vqshlq_r_u16(uint16x8_t a
, int32_t b
)
325 return vqshlq_r(a
, b
);
326 #else /* POLYMORPHIC */
327 return vqshlq_r_u16(a
, b
);
328 #endif /* POLYMORPHIC */
331 // CHECK-LABEL: @test_vqshlq_r_u32(
332 // CHECK-NEXT: entry:
333 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.v4i32(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 1)
334 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
336 uint32x4_t
test_vqshlq_r_u32(uint32x4_t a
, int32_t b
)
339 return vqshlq_r(a
, b
);
340 #else /* POLYMORPHIC */
341 return vqshlq_r_u32(a
, b
);
342 #endif /* POLYMORPHIC */
345 // CHECK-LABEL: @test_vrshlq_s8(
346 // CHECK-NEXT: entry:
347 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.v16i8.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 1, i32 0)
348 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
350 int8x16_t
test_vrshlq_s8(int8x16_t a
, int8x16_t b
)
354 #else /* POLYMORPHIC */
355 return vrshlq_s8(a
, b
);
356 #endif /* POLYMORPHIC */
359 // CHECK-LABEL: @test_vrshlq_s16(
360 // CHECK-NEXT: entry:
361 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.v8i16.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 1, i32 0)
362 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
364 int16x8_t
test_vrshlq_s16(int16x8_t a
, int16x8_t b
)
368 #else /* POLYMORPHIC */
369 return vrshlq_s16(a
, b
);
370 #endif /* POLYMORPHIC */
373 // CHECK-LABEL: @test_vrshlq_s32(
374 // CHECK-NEXT: entry:
375 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.v4i32.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 1, i32 0)
376 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
378 int32x4_t
test_vrshlq_s32(int32x4_t a
, int32x4_t b
)
382 #else /* POLYMORPHIC */
383 return vrshlq_s32(a
, b
);
384 #endif /* POLYMORPHIC */
387 // CHECK-LABEL: @test_vrshlq_u8(
388 // CHECK-NEXT: entry:
389 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.v16i8.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 1, i32 1)
390 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
392 uint8x16_t
test_vrshlq_u8(uint8x16_t a
, int8x16_t b
)
396 #else /* POLYMORPHIC */
397 return vrshlq_u8(a
, b
);
398 #endif /* POLYMORPHIC */
401 // CHECK-LABEL: @test_vrshlq_u16(
402 // CHECK-NEXT: entry:
403 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.v8i16.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 1, i32 1)
404 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
406 uint16x8_t
test_vrshlq_u16(uint16x8_t a
, int16x8_t b
)
410 #else /* POLYMORPHIC */
411 return vrshlq_u16(a
, b
);
412 #endif /* POLYMORPHIC */
415 // CHECK-LABEL: @test_vrshlq_u32(
416 // CHECK-NEXT: entry:
417 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.v4i32.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 1, i32 1)
418 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
420 uint32x4_t
test_vrshlq_u32(uint32x4_t a
, int32x4_t b
)
424 #else /* POLYMORPHIC */
425 return vrshlq_u32(a
, b
);
426 #endif /* POLYMORPHIC */
429 // CHECK-LABEL: @test_vrshlq_n_s8(
430 // CHECK-NEXT: entry:
431 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.v16i8(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 0)
432 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
434 int8x16_t
test_vrshlq_n_s8(int8x16_t a
, int32_t b
)
438 #else /* POLYMORPHIC */
439 return vrshlq_n_s8(a
, b
);
440 #endif /* POLYMORPHIC */
443 // CHECK-LABEL: @test_vrshlq_n_s16(
444 // CHECK-NEXT: entry:
445 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.v8i16(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 0)
446 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
448 int16x8_t
test_vrshlq_n_s16(int16x8_t a
, int32_t b
)
452 #else /* POLYMORPHIC */
453 return vrshlq_n_s16(a
, b
);
454 #endif /* POLYMORPHIC */
457 // CHECK-LABEL: @test_vrshlq_n_s32(
458 // CHECK-NEXT: entry:
459 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.v4i32(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 0)
460 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
462 int32x4_t
test_vrshlq_n_s32(int32x4_t a
, int32_t b
)
466 #else /* POLYMORPHIC */
467 return vrshlq_n_s32(a
, b
);
468 #endif /* POLYMORPHIC */
471 // CHECK-LABEL: @test_vrshlq_n_u8(
472 // CHECK-NEXT: entry:
473 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.v16i8(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 1)
474 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
476 uint8x16_t
test_vrshlq_n_u8(uint8x16_t a
, int32_t b
)
480 #else /* POLYMORPHIC */
481 return vrshlq_n_u8(a
, b
);
482 #endif /* POLYMORPHIC */
485 // CHECK-LABEL: @test_vrshlq_n_u16(
486 // CHECK-NEXT: entry:
487 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.v8i16(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 1)
488 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
490 uint16x8_t
test_vrshlq_n_u16(uint16x8_t a
, int32_t b
)
494 #else /* POLYMORPHIC */
495 return vrshlq_n_u16(a
, b
);
496 #endif /* POLYMORPHIC */
499 // CHECK-LABEL: @test_vrshlq_n_u32(
500 // CHECK-NEXT: entry:
501 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.v4i32(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 1)
502 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
504 uint32x4_t
test_vrshlq_n_u32(uint32x4_t a
, int32_t b
)
508 #else /* POLYMORPHIC */
509 return vrshlq_n_u32(a
, b
);
510 #endif /* POLYMORPHIC */
513 // CHECK-LABEL: @test_vqrshlq_s8(
514 // CHECK-NEXT: entry:
515 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.v16i8.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 1, i32 0)
516 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
518 int8x16_t
test_vqrshlq_s8(int8x16_t a
, int8x16_t b
)
521 return vqrshlq(a
, b
);
522 #else /* POLYMORPHIC */
523 return vqrshlq_s8(a
, b
);
524 #endif /* POLYMORPHIC */
527 // CHECK-LABEL: @test_vqrshlq_s16(
528 // CHECK-NEXT: entry:
529 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.v8i16.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0)
530 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
532 int16x8_t
test_vqrshlq_s16(int16x8_t a
, int16x8_t b
)
535 return vqrshlq(a
, b
);
536 #else /* POLYMORPHIC */
537 return vqrshlq_s16(a
, b
);
538 #endif /* POLYMORPHIC */
541 // CHECK-LABEL: @test_vqrshlq_s32(
542 // CHECK-NEXT: entry:
543 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.v4i32.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0)
544 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
546 int32x4_t
test_vqrshlq_s32(int32x4_t a
, int32x4_t b
)
549 return vqrshlq(a
, b
);
550 #else /* POLYMORPHIC */
551 return vqrshlq_s32(a
, b
);
552 #endif /* POLYMORPHIC */
555 // CHECK-LABEL: @test_vqrshlq_u8(
556 // CHECK-NEXT: entry:
557 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.v16i8.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 1, i32 1)
558 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
560 uint8x16_t
test_vqrshlq_u8(uint8x16_t a
, int8x16_t b
)
563 return vqrshlq(a
, b
);
564 #else /* POLYMORPHIC */
565 return vqrshlq_u8(a
, b
);
566 #endif /* POLYMORPHIC */
569 // CHECK-LABEL: @test_vqrshlq_u16(
570 // CHECK-NEXT: entry:
571 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.v8i16.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1)
572 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
574 uint16x8_t
test_vqrshlq_u16(uint16x8_t a
, int16x8_t b
)
577 return vqrshlq(a
, b
);
578 #else /* POLYMORPHIC */
579 return vqrshlq_u16(a
, b
);
580 #endif /* POLYMORPHIC */
583 // CHECK-LABEL: @test_vqrshlq_u32(
584 // CHECK-NEXT: entry:
585 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.v4i32.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 1)
586 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
588 uint32x4_t
test_vqrshlq_u32(uint32x4_t a
, int32x4_t b
)
591 return vqrshlq(a
, b
);
592 #else /* POLYMORPHIC */
593 return vqrshlq_u32(a
, b
);
594 #endif /* POLYMORPHIC */
597 // CHECK-LABEL: @test_vqrshlq_n_s8(
598 // CHECK-NEXT: entry:
599 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.v16i8(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 0)
600 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
602 int8x16_t
test_vqrshlq_n_s8(int8x16_t a
, int32_t b
)
605 return vqrshlq(a
, b
);
606 #else /* POLYMORPHIC */
607 return vqrshlq_n_s8(a
, b
);
608 #endif /* POLYMORPHIC */
611 // CHECK-LABEL: @test_vqrshlq_n_s16(
612 // CHECK-NEXT: entry:
613 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.v8i16(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 0)
614 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
616 int16x8_t
test_vqrshlq_n_s16(int16x8_t a
, int32_t b
)
619 return vqrshlq(a
, b
);
620 #else /* POLYMORPHIC */
621 return vqrshlq_n_s16(a
, b
);
622 #endif /* POLYMORPHIC */
625 // CHECK-LABEL: @test_vqrshlq_n_s32(
626 // CHECK-NEXT: entry:
627 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.v4i32(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 0)
628 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
630 int32x4_t
test_vqrshlq_n_s32(int32x4_t a
, int32_t b
)
633 return vqrshlq(a
, b
);
634 #else /* POLYMORPHIC */
635 return vqrshlq_n_s32(a
, b
);
636 #endif /* POLYMORPHIC */
639 // CHECK-LABEL: @test_vqrshlq_n_u8(
640 // CHECK-NEXT: entry:
641 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.v16i8(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 1)
642 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
644 uint8x16_t
test_vqrshlq_n_u8(uint8x16_t a
, int32_t b
)
647 return vqrshlq(a
, b
);
648 #else /* POLYMORPHIC */
649 return vqrshlq_n_u8(a
, b
);
650 #endif /* POLYMORPHIC */
653 // CHECK-LABEL: @test_vqrshlq_n_u16(
654 // CHECK-NEXT: entry:
655 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.v8i16(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 1)
656 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
658 uint16x8_t
test_vqrshlq_n_u16(uint16x8_t a
, int32_t b
)
661 return vqrshlq(a
, b
);
662 #else /* POLYMORPHIC */
663 return vqrshlq_n_u16(a
, b
);
664 #endif /* POLYMORPHIC */
667 // CHECK-LABEL: @test_vqrshlq_n_u32(
668 // CHECK-NEXT: entry:
669 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.v4i32(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 1)
670 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
672 uint32x4_t
test_vqrshlq_n_u32(uint32x4_t a
, int32_t b
)
675 return vqrshlq(a
, b
);
676 #else /* POLYMORPHIC */
677 return vqrshlq_n_u32(a
, b
);
678 #endif /* POLYMORPHIC */
681 // CHECK-LABEL: @test_vshlq_m_s8(
682 // CHECK-NEXT: entry:
683 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
684 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
685 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 0, i32 0, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
686 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
688 int8x16_t
test_vshlq_m_s8(int8x16_t inactive
, int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
691 return vshlq_m(inactive
, a
, b
, p
);
692 #else /* POLYMORPHIC */
693 return vshlq_m_s8(inactive
, a
, b
, p
);
694 #endif /* POLYMORPHIC */
697 // CHECK-LABEL: @test_vshlq_m_s16(
698 // CHECK-NEXT: entry:
699 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
700 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
701 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
702 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
704 int16x8_t
test_vshlq_m_s16(int16x8_t inactive
, int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
707 return vshlq_m(inactive
, a
, b
, p
);
708 #else /* POLYMORPHIC */
709 return vshlq_m_s16(inactive
, a
, b
, p
);
710 #endif /* POLYMORPHIC */
713 // CHECK-LABEL: @test_vshlq_m_s32(
714 // CHECK-NEXT: entry:
715 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
716 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
717 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
718 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
720 int32x4_t
test_vshlq_m_s32(int32x4_t inactive
, int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
723 return vshlq_m(inactive
, a
, b
, p
);
724 #else /* POLYMORPHIC */
725 return vshlq_m_s32(inactive
, a
, b
, p
);
726 #endif /* POLYMORPHIC */
729 // CHECK-LABEL: @test_vshlq_m_u8(
730 // CHECK-NEXT: entry:
731 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
732 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
733 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 0, i32 1, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
734 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
736 uint8x16_t
test_vshlq_m_u8(uint8x16_t inactive
, uint8x16_t a
, int8x16_t b
, mve_pred16_t p
)
739 return vshlq_m(inactive
, a
, b
, p
);
740 #else /* POLYMORPHIC */
741 return vshlq_m_u8(inactive
, a
, b
, p
);
742 #endif /* POLYMORPHIC */
745 // CHECK-LABEL: @test_vshlq_m_u16(
746 // CHECK-NEXT: entry:
747 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
748 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
749 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
750 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
752 uint16x8_t
test_vshlq_m_u16(uint16x8_t inactive
, uint16x8_t a
, int16x8_t b
, mve_pred16_t p
)
755 return vshlq_m(inactive
, a
, b
, p
);
756 #else /* POLYMORPHIC */
757 return vshlq_m_u16(inactive
, a
, b
, p
);
758 #endif /* POLYMORPHIC */
761 // CHECK-LABEL: @test_vshlq_m_u32(
762 // CHECK-NEXT: entry:
763 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
764 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
765 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
766 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
768 uint32x4_t
test_vshlq_m_u32(uint32x4_t inactive
, uint32x4_t a
, int32x4_t b
, mve_pred16_t p
)
771 return vshlq_m(inactive
, a
, b
, p
);
772 #else /* POLYMORPHIC */
773 return vshlq_m_u32(inactive
, a
, b
, p
);
774 #endif /* POLYMORPHIC */
777 // CHECK-LABEL: @test_vshlq_x_s8(
778 // CHECK-NEXT: entry:
779 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
780 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
781 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 0, i32 0, <16 x i1> [[TMP1]], <16 x i8> undef)
782 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
784 int8x16_t
test_vshlq_x_s8(int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
787 return vshlq_x(a
, b
, p
);
788 #else /* POLYMORPHIC */
789 return vshlq_x_s8(a
, b
, p
);
790 #endif /* POLYMORPHIC */
793 // CHECK-LABEL: @test_vshlq_x_s16(
794 // CHECK-NEXT: entry:
795 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
796 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
797 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 0, <8 x i1> [[TMP1]], <8 x i16> undef)
798 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
800 int16x8_t
test_vshlq_x_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
803 return vshlq_x(a
, b
, p
);
804 #else /* POLYMORPHIC */
805 return vshlq_x_s16(a
, b
, p
);
806 #endif /* POLYMORPHIC */
809 // CHECK-LABEL: @test_vshlq_x_s32(
810 // CHECK-NEXT: entry:
811 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
812 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
813 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 0, <4 x i1> [[TMP1]], <4 x i32> undef)
814 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
816 int32x4_t
test_vshlq_x_s32(int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
819 return vshlq_x(a
, b
, p
);
820 #else /* POLYMORPHIC */
821 return vshlq_x_s32(a
, b
, p
);
822 #endif /* POLYMORPHIC */
825 // CHECK-LABEL: @test_vshlq_x_u8(
826 // CHECK-NEXT: entry:
827 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
828 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
829 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 0, i32 1, <16 x i1> [[TMP1]], <16 x i8> undef)
830 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
832 uint8x16_t
test_vshlq_x_u8(uint8x16_t a
, int8x16_t b
, mve_pred16_t p
)
835 return vshlq_x(a
, b
, p
);
836 #else /* POLYMORPHIC */
837 return vshlq_x_u8(a
, b
, p
);
838 #endif /* POLYMORPHIC */
841 // CHECK-LABEL: @test_vshlq_x_u16(
842 // CHECK-NEXT: entry:
843 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
844 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
845 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 0, i32 1, <8 x i1> [[TMP1]], <8 x i16> undef)
846 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
848 uint16x8_t
test_vshlq_x_u16(uint16x8_t a
, int16x8_t b
, mve_pred16_t p
)
851 return vshlq_x(a
, b
, p
);
852 #else /* POLYMORPHIC */
853 return vshlq_x_u16(a
, b
, p
);
854 #endif /* POLYMORPHIC */
857 // CHECK-LABEL: @test_vshlq_x_u32(
858 // CHECK-NEXT: entry:
859 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
860 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
861 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 0, i32 1, <4 x i1> [[TMP1]], <4 x i32> undef)
862 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
864 uint32x4_t
test_vshlq_x_u32(uint32x4_t a
, int32x4_t b
, mve_pred16_t p
)
867 return vshlq_x(a
, b
, p
);
868 #else /* POLYMORPHIC */
869 return vshlq_x_u32(a
, b
, p
);
870 #endif /* POLYMORPHIC */
873 // CHECK-LABEL: @test_vshlq_m_r_s8(
874 // CHECK-NEXT: entry:
875 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
876 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
877 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 0, <16 x i1> [[TMP1]])
878 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
880 int8x16_t
test_vshlq_m_r_s8(int8x16_t a
, int32_t b
, mve_pred16_t p
)
883 return vshlq_m_r(a
, b
, p
);
884 #else /* POLYMORPHIC */
885 return vshlq_m_r_s8(a
, b
, p
);
886 #endif /* POLYMORPHIC */
889 // CHECK-LABEL: @test_vshlq_m_r_s16(
890 // CHECK-NEXT: entry:
891 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
892 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
893 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 0, <8 x i1> [[TMP1]])
894 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
896 int16x8_t
test_vshlq_m_r_s16(int16x8_t a
, int32_t b
, mve_pred16_t p
)
899 return vshlq_m_r(a
, b
, p
);
900 #else /* POLYMORPHIC */
901 return vshlq_m_r_s16(a
, b
, p
);
902 #endif /* POLYMORPHIC */
905 // CHECK-LABEL: @test_vshlq_m_r_s32(
906 // CHECK-NEXT: entry:
907 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
908 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
909 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 0, <4 x i1> [[TMP1]])
910 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
912 int32x4_t
test_vshlq_m_r_s32(int32x4_t a
, int32_t b
, mve_pred16_t p
)
915 return vshlq_m_r(a
, b
, p
);
916 #else /* POLYMORPHIC */
917 return vshlq_m_r_s32(a
, b
, p
);
918 #endif /* POLYMORPHIC */
921 // CHECK-LABEL: @test_vshlq_m_r_u8(
922 // CHECK-NEXT: entry:
923 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
924 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
925 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 1, <16 x i1> [[TMP1]])
926 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
928 uint8x16_t
test_vshlq_m_r_u8(uint8x16_t a
, int32_t b
, mve_pred16_t p
)
931 return vshlq_m_r(a
, b
, p
);
932 #else /* POLYMORPHIC */
933 return vshlq_m_r_u8(a
, b
, p
);
934 #endif /* POLYMORPHIC */
937 // CHECK-LABEL: @test_vshlq_m_r_u16(
938 // CHECK-NEXT: entry:
939 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
940 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
941 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 1, <8 x i1> [[TMP1]])
942 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
944 uint16x8_t
test_vshlq_m_r_u16(uint16x8_t a
, int32_t b
, mve_pred16_t p
)
947 return vshlq_m_r(a
, b
, p
);
948 #else /* POLYMORPHIC */
949 return vshlq_m_r_u16(a
, b
, p
);
950 #endif /* POLYMORPHIC */
953 // CHECK-LABEL: @test_vshlq_m_r_u32(
954 // CHECK-NEXT: entry:
955 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
956 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
957 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 0, i32 1, <4 x i1> [[TMP1]])
958 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
960 uint32x4_t
test_vshlq_m_r_u32(uint32x4_t a
, int32_t b
, mve_pred16_t p
)
963 return vshlq_m_r(a
, b
, p
);
964 #else /* POLYMORPHIC */
965 return vshlq_m_r_u32(a
, b
, p
);
966 #endif /* POLYMORPHIC */
969 // CHECK-LABEL: @test_vqshlq_m_s8(
970 // CHECK-NEXT: entry:
971 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
972 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
973 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 0, i32 0, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
974 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
976 int8x16_t
test_vqshlq_m_s8(int8x16_t inactive
, int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
979 return vqshlq_m(inactive
, a
, b
, p
);
980 #else /* POLYMORPHIC */
981 return vqshlq_m_s8(inactive
, a
, b
, p
);
982 #endif /* POLYMORPHIC */
985 // CHECK-LABEL: @test_vqshlq_m_s16(
986 // CHECK-NEXT: entry:
987 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
988 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
989 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
990 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
992 int16x8_t
test_vqshlq_m_s16(int16x8_t inactive
, int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
995 return vqshlq_m(inactive
, a
, b
, p
);
996 #else /* POLYMORPHIC */
997 return vqshlq_m_s16(inactive
, a
, b
, p
);
998 #endif /* POLYMORPHIC */
1001 // CHECK-LABEL: @test_vqshlq_m_s32(
1002 // CHECK-NEXT: entry:
1003 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1004 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1005 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
1006 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1008 int32x4_t
test_vqshlq_m_s32(int32x4_t inactive
, int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1011 return vqshlq_m(inactive
, a
, b
, p
);
1012 #else /* POLYMORPHIC */
1013 return vqshlq_m_s32(inactive
, a
, b
, p
);
1014 #endif /* POLYMORPHIC */
1017 // CHECK-LABEL: @test_vqshlq_m_u8(
1018 // CHECK-NEXT: entry:
1019 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1020 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1021 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 0, i32 1, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
1022 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1024 uint8x16_t
test_vqshlq_m_u8(uint8x16_t inactive
, uint8x16_t a
, int8x16_t b
, mve_pred16_t p
)
1027 return vqshlq_m(inactive
, a
, b
, p
);
1028 #else /* POLYMORPHIC */
1029 return vqshlq_m_u8(inactive
, a
, b
, p
);
1030 #endif /* POLYMORPHIC */
1033 // CHECK-LABEL: @test_vqshlq_m_u16(
1034 // CHECK-NEXT: entry:
1035 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1036 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1037 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
1038 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1040 uint16x8_t
test_vqshlq_m_u16(uint16x8_t inactive
, uint16x8_t a
, int16x8_t b
, mve_pred16_t p
)
1043 return vqshlq_m(inactive
, a
, b
, p
);
1044 #else /* POLYMORPHIC */
1045 return vqshlq_m_u16(inactive
, a
, b
, p
);
1046 #endif /* POLYMORPHIC */
1049 // CHECK-LABEL: @test_vqshlq_m_u32(
1050 // CHECK-NEXT: entry:
1051 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1052 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1053 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 0, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
1054 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1056 uint32x4_t
test_vqshlq_m_u32(uint32x4_t inactive
, uint32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1059 return vqshlq_m(inactive
, a
, b
, p
);
1060 #else /* POLYMORPHIC */
1061 return vqshlq_m_u32(inactive
, a
, b
, p
);
1062 #endif /* POLYMORPHIC */
1065 // CHECK-LABEL: @test_vqshlq_m_r_s8(
1066 // CHECK-NEXT: entry:
1067 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1068 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1069 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 0, <16 x i1> [[TMP1]])
1070 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1072 int8x16_t
test_vqshlq_m_r_s8(int8x16_t a
, int32_t b
, mve_pred16_t p
)
1075 return vqshlq_m_r(a
, b
, p
);
1076 #else /* POLYMORPHIC */
1077 return vqshlq_m_r_s8(a
, b
, p
);
1078 #endif /* POLYMORPHIC */
1081 // CHECK-LABEL: @test_vqshlq_m_r_s16(
1082 // CHECK-NEXT: entry:
1083 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1084 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1085 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 0, <8 x i1> [[TMP1]])
1086 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1088 int16x8_t
test_vqshlq_m_r_s16(int16x8_t a
, int32_t b
, mve_pred16_t p
)
1091 return vqshlq_m_r(a
, b
, p
);
1092 #else /* POLYMORPHIC */
1093 return vqshlq_m_r_s16(a
, b
, p
);
1094 #endif /* POLYMORPHIC */
1097 // CHECK-LABEL: @test_vqshlq_m_r_s32(
1098 // CHECK-NEXT: entry:
1099 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1100 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1101 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 0, <4 x i1> [[TMP1]])
1102 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1104 int32x4_t
test_vqshlq_m_r_s32(int32x4_t a
, int32_t b
, mve_pred16_t p
)
1107 return vqshlq_m_r(a
, b
, p
);
1108 #else /* POLYMORPHIC */
1109 return vqshlq_m_r_s32(a
, b
, p
);
1110 #endif /* POLYMORPHIC */
1113 // CHECK-LABEL: @test_vqshlq_m_r_u8(
1114 // CHECK-NEXT: entry:
1115 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1116 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1117 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 1, <16 x i1> [[TMP1]])
1118 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1120 uint8x16_t
test_vqshlq_m_r_u8(uint8x16_t a
, int32_t b
, mve_pred16_t p
)
1123 return vqshlq_m_r(a
, b
, p
);
1124 #else /* POLYMORPHIC */
1125 return vqshlq_m_r_u8(a
, b
, p
);
1126 #endif /* POLYMORPHIC */
1129 // CHECK-LABEL: @test_vqshlq_m_r_u16(
1130 // CHECK-NEXT: entry:
1131 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1132 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1133 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 1, <8 x i1> [[TMP1]])
1134 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1136 uint16x8_t
test_vqshlq_m_r_u16(uint16x8_t a
, int32_t b
, mve_pred16_t p
)
1139 return vqshlq_m_r(a
, b
, p
);
1140 #else /* POLYMORPHIC */
1141 return vqshlq_m_r_u16(a
, b
, p
);
1142 #endif /* POLYMORPHIC */
1145 // CHECK-LABEL: @test_vqshlq_m_r_u32(
1146 // CHECK-NEXT: entry:
1147 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1148 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1149 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 0, i32 1, <4 x i1> [[TMP1]])
1150 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1152 uint32x4_t
test_vqshlq_m_r_u32(uint32x4_t a
, int32_t b
, mve_pred16_t p
)
1155 return vqshlq_m_r(a
, b
, p
);
1156 #else /* POLYMORPHIC */
1157 return vqshlq_m_r_u32(a
, b
, p
);
1158 #endif /* POLYMORPHIC */
1161 // CHECK-LABEL: @test_vrshlq_m_s8(
1162 // CHECK-NEXT: entry:
1163 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1164 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1165 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 1, i32 0, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
1166 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1168 int8x16_t
test_vrshlq_m_s8(int8x16_t inactive
, int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
1171 return vrshlq_m(inactive
, a
, b
, p
);
1172 #else /* POLYMORPHIC */
1173 return vrshlq_m_s8(inactive
, a
, b
, p
);
1174 #endif /* POLYMORPHIC */
1177 // CHECK-LABEL: @test_vrshlq_m_s16(
1178 // CHECK-NEXT: entry:
1179 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1180 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1181 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 1, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
1182 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1184 int16x8_t
test_vrshlq_m_s16(int16x8_t inactive
, int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
1187 return vrshlq_m(inactive
, a
, b
, p
);
1188 #else /* POLYMORPHIC */
1189 return vrshlq_m_s16(inactive
, a
, b
, p
);
1190 #endif /* POLYMORPHIC */
1193 // CHECK-LABEL: @test_vrshlq_m_s32(
1194 // CHECK-NEXT: entry:
1195 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1196 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1197 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 1, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
1198 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1200 int32x4_t
test_vrshlq_m_s32(int32x4_t inactive
, int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1203 return vrshlq_m(inactive
, a
, b
, p
);
1204 #else /* POLYMORPHIC */
1205 return vrshlq_m_s32(inactive
, a
, b
, p
);
1206 #endif /* POLYMORPHIC */
1209 // CHECK-LABEL: @test_vrshlq_m_u8(
1210 // CHECK-NEXT: entry:
1211 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1212 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1213 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 1, i32 1, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
1214 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1216 uint8x16_t
test_vrshlq_m_u8(uint8x16_t inactive
, uint8x16_t a
, int8x16_t b
, mve_pred16_t p
)
1219 return vrshlq_m(inactive
, a
, b
, p
);
1220 #else /* POLYMORPHIC */
1221 return vrshlq_m_u8(inactive
, a
, b
, p
);
1222 #endif /* POLYMORPHIC */
1225 // CHECK-LABEL: @test_vrshlq_m_u16(
1226 // CHECK-NEXT: entry:
1227 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1228 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1229 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 1, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
1230 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1232 uint16x8_t
test_vrshlq_m_u16(uint16x8_t inactive
, uint16x8_t a
, int16x8_t b
, mve_pred16_t p
)
1235 return vrshlq_m(inactive
, a
, b
, p
);
1236 #else /* POLYMORPHIC */
1237 return vrshlq_m_u16(inactive
, a
, b
, p
);
1238 #endif /* POLYMORPHIC */
1241 // CHECK-LABEL: @test_vrshlq_m_u32(
1242 // CHECK-NEXT: entry:
1243 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1244 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1245 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 1, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
1246 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1248 uint32x4_t
test_vrshlq_m_u32(uint32x4_t inactive
, uint32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1251 return vrshlq_m(inactive
, a
, b
, p
);
1252 #else /* POLYMORPHIC */
1253 return vrshlq_m_u32(inactive
, a
, b
, p
);
1254 #endif /* POLYMORPHIC */
1257 // CHECK-LABEL: @test_vrshlq_x_s8(
1258 // CHECK-NEXT: entry:
1259 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1260 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1261 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 1, i32 0, <16 x i1> [[TMP1]], <16 x i8> undef)
1262 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1264 int8x16_t
test_vrshlq_x_s8(int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
1267 return vrshlq_x(a
, b
, p
);
1268 #else /* POLYMORPHIC */
1269 return vrshlq_x_s8(a
, b
, p
);
1270 #endif /* POLYMORPHIC */
1273 // CHECK-LABEL: @test_vrshlq_x_s16(
1274 // CHECK-NEXT: entry:
1275 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1276 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1277 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 1, i32 0, <8 x i1> [[TMP1]], <8 x i16> undef)
1278 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1280 int16x8_t
test_vrshlq_x_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
1283 return vrshlq_x(a
, b
, p
);
1284 #else /* POLYMORPHIC */
1285 return vrshlq_x_s16(a
, b
, p
);
1286 #endif /* POLYMORPHIC */
1289 // CHECK-LABEL: @test_vrshlq_x_s32(
1290 // CHECK-NEXT: entry:
1291 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1292 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1293 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 1, i32 0, <4 x i1> [[TMP1]], <4 x i32> undef)
1294 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1296 int32x4_t
test_vrshlq_x_s32(int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1299 return vrshlq_x(a
, b
, p
);
1300 #else /* POLYMORPHIC */
1301 return vrshlq_x_s32(a
, b
, p
);
1302 #endif /* POLYMORPHIC */
1305 // CHECK-LABEL: @test_vrshlq_x_u8(
1306 // CHECK-NEXT: entry:
1307 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1308 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1309 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 0, i32 1, i32 1, <16 x i1> [[TMP1]], <16 x i8> undef)
1310 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1312 uint8x16_t
test_vrshlq_x_u8(uint8x16_t a
, int8x16_t b
, mve_pred16_t p
)
1315 return vrshlq_x(a
, b
, p
);
1316 #else /* POLYMORPHIC */
1317 return vrshlq_x_u8(a
, b
, p
);
1318 #endif /* POLYMORPHIC */
1321 // CHECK-LABEL: @test_vrshlq_x_u16(
1322 // CHECK-NEXT: entry:
1323 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1324 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1325 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 0, i32 1, i32 1, <8 x i1> [[TMP1]], <8 x i16> undef)
1326 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1328 uint16x8_t
test_vrshlq_x_u16(uint16x8_t a
, int16x8_t b
, mve_pred16_t p
)
1331 return vrshlq_x(a
, b
, p
);
1332 #else /* POLYMORPHIC */
1333 return vrshlq_x_u16(a
, b
, p
);
1334 #endif /* POLYMORPHIC */
1337 // CHECK-LABEL: @test_vrshlq_x_u32(
1338 // CHECK-NEXT: entry:
1339 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1340 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1341 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, i32 1, i32 1, <4 x i1> [[TMP1]], <4 x i32> undef)
1342 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1344 uint32x4_t
test_vrshlq_x_u32(uint32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1347 return vrshlq_x(a
, b
, p
);
1348 #else /* POLYMORPHIC */
1349 return vrshlq_x_u32(a
, b
, p
);
1350 #endif /* POLYMORPHIC */
1353 // CHECK-LABEL: @test_vrshlq_m_n_s8(
1354 // CHECK-NEXT: entry:
1355 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1356 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1357 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 0, <16 x i1> [[TMP1]])
1358 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1360 int8x16_t
test_vrshlq_m_n_s8(int8x16_t a
, int32_t b
, mve_pred16_t p
)
1363 return vrshlq_m_n(a
, b
, p
);
1364 #else /* POLYMORPHIC */
1365 return vrshlq_m_n_s8(a
, b
, p
);
1366 #endif /* POLYMORPHIC */
1369 // CHECK-LABEL: @test_vrshlq_m_n_s16(
1370 // CHECK-NEXT: entry:
1371 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1372 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1373 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 0, <8 x i1> [[TMP1]])
1374 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1376 int16x8_t
test_vrshlq_m_n_s16(int16x8_t a
, int32_t b
, mve_pred16_t p
)
1379 return vrshlq_m_n(a
, b
, p
);
1380 #else /* POLYMORPHIC */
1381 return vrshlq_m_n_s16(a
, b
, p
);
1382 #endif /* POLYMORPHIC */
1385 // CHECK-LABEL: @test_vrshlq_m_n_s32(
1386 // CHECK-NEXT: entry:
1387 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1388 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1389 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 0, <4 x i1> [[TMP1]])
1390 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1392 int32x4_t
test_vrshlq_m_n_s32(int32x4_t a
, int32_t b
, mve_pred16_t p
)
1395 return vrshlq_m_n(a
, b
, p
);
1396 #else /* POLYMORPHIC */
1397 return vrshlq_m_n_s32(a
, b
, p
);
1398 #endif /* POLYMORPHIC */
1401 // CHECK-LABEL: @test_vrshlq_m_n_u8(
1402 // CHECK-NEXT: entry:
1403 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1404 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1405 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 1, <16 x i1> [[TMP1]])
1406 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1408 uint8x16_t
test_vrshlq_m_n_u8(uint8x16_t a
, int32_t b
, mve_pred16_t p
)
1411 return vrshlq_m_n(a
, b
, p
);
1412 #else /* POLYMORPHIC */
1413 return vrshlq_m_n_u8(a
, b
, p
);
1414 #endif /* POLYMORPHIC */
1417 // CHECK-LABEL: @test_vrshlq_m_n_u16(
1418 // CHECK-NEXT: entry:
1419 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1420 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1421 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 1, <8 x i1> [[TMP1]])
1422 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1424 uint16x8_t
test_vrshlq_m_n_u16(uint16x8_t a
, int32_t b
, mve_pred16_t p
)
1427 return vrshlq_m_n(a
, b
, p
);
1428 #else /* POLYMORPHIC */
1429 return vrshlq_m_n_u16(a
, b
, p
);
1430 #endif /* POLYMORPHIC */
1433 // CHECK-LABEL: @test_vrshlq_m_n_u32(
1434 // CHECK-NEXT: entry:
1435 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1436 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1437 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 0, i32 1, i32 1, <4 x i1> [[TMP1]])
1438 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1440 uint32x4_t
test_vrshlq_m_n_u32(uint32x4_t a
, int32_t b
, mve_pred16_t p
)
1443 return vrshlq_m_n(a
, b
, p
);
1444 #else /* POLYMORPHIC */
1445 return vrshlq_m_n_u32(a
, b
, p
);
1446 #endif /* POLYMORPHIC */
1449 // CHECK-LABEL: @test_vqrshlq_m_s8(
1450 // CHECK-NEXT: entry:
1451 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1452 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1453 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 1, i32 0, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
1454 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1456 int8x16_t
test_vqrshlq_m_s8(int8x16_t inactive
, int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
1459 return vqrshlq_m(inactive
, a
, b
, p
);
1460 #else /* POLYMORPHIC */
1461 return vqrshlq_m_s8(inactive
, a
, b
, p
);
1462 #endif /* POLYMORPHIC */
1465 // CHECK-LABEL: @test_vqrshlq_m_s16(
1466 // CHECK-NEXT: entry:
1467 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1468 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1469 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
1470 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1472 int16x8_t
test_vqrshlq_m_s16(int16x8_t inactive
, int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
1475 return vqrshlq_m(inactive
, a
, b
, p
);
1476 #else /* POLYMORPHIC */
1477 return vqrshlq_m_s16(inactive
, a
, b
, p
);
1478 #endif /* POLYMORPHIC */
1481 // CHECK-LABEL: @test_vqrshlq_m_s32(
1482 // CHECK-NEXT: entry:
1483 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1484 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1485 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
1486 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1488 int32x4_t
test_vqrshlq_m_s32(int32x4_t inactive
, int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1491 return vqrshlq_m(inactive
, a
, b
, p
);
1492 #else /* POLYMORPHIC */
1493 return vqrshlq_m_s32(inactive
, a
, b
, p
);
1494 #endif /* POLYMORPHIC */
1497 // CHECK-LABEL: @test_vqrshlq_m_u8(
1498 // CHECK-NEXT: entry:
1499 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1500 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1501 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.vector.predicated.v16i8.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1, i32 1, i32 1, <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
1502 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1504 uint8x16_t
test_vqrshlq_m_u8(uint8x16_t inactive
, uint8x16_t a
, int8x16_t b
, mve_pred16_t p
)
1507 return vqrshlq_m(inactive
, a
, b
, p
);
1508 #else /* POLYMORPHIC */
1509 return vqrshlq_m_u8(inactive
, a
, b
, p
);
1510 #endif /* POLYMORPHIC */
1513 // CHECK-LABEL: @test_vqrshlq_m_u16(
1514 // CHECK-NEXT: entry:
1515 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1516 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1517 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.vector.predicated.v8i16.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1, <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
1518 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1520 uint16x8_t
test_vqrshlq_m_u16(uint16x8_t inactive
, uint16x8_t a
, int16x8_t b
, mve_pred16_t p
)
1523 return vqrshlq_m(inactive
, a
, b
, p
);
1524 #else /* POLYMORPHIC */
1525 return vqrshlq_m_u16(inactive
, a
, b
, p
);
1526 #endif /* POLYMORPHIC */
1529 // CHECK-LABEL: @test_vqrshlq_m_u32(
1530 // CHECK-NEXT: entry:
1531 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1532 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1533 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.vector.predicated.v4i32.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 1, <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
1534 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1536 uint32x4_t
test_vqrshlq_m_u32(uint32x4_t inactive
, uint32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1539 return vqrshlq_m(inactive
, a
, b
, p
);
1540 #else /* POLYMORPHIC */
1541 return vqrshlq_m_u32(inactive
, a
, b
, p
);
1542 #endif /* POLYMORPHIC */
1545 // CHECK-LABEL: @test_vqrshlq_m_n_s8(
1546 // CHECK-NEXT: entry:
1547 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1548 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1549 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 0, <16 x i1> [[TMP1]])
1550 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1552 int8x16_t
test_vqrshlq_m_n_s8(int8x16_t a
, int32_t b
, mve_pred16_t p
)
1555 return vqrshlq_m_n(a
, b
, p
);
1556 #else /* POLYMORPHIC */
1557 return vqrshlq_m_n_s8(a
, b
, p
);
1558 #endif /* POLYMORPHIC */
1561 // CHECK-LABEL: @test_vqrshlq_m_n_s16(
1562 // CHECK-NEXT: entry:
1563 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1564 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1565 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 0, <8 x i1> [[TMP1]])
1566 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1568 int16x8_t
test_vqrshlq_m_n_s16(int16x8_t a
, int32_t b
, mve_pred16_t p
)
1571 return vqrshlq_m_n(a
, b
, p
);
1572 #else /* POLYMORPHIC */
1573 return vqrshlq_m_n_s16(a
, b
, p
);
1574 #endif /* POLYMORPHIC */
1577 // CHECK-LABEL: @test_vqrshlq_m_n_s32(
1578 // CHECK-NEXT: entry:
1579 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1580 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1581 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 0, <4 x i1> [[TMP1]])
1582 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1584 int32x4_t
test_vqrshlq_m_n_s32(int32x4_t a
, int32_t b
, mve_pred16_t p
)
1587 return vqrshlq_m_n(a
, b
, p
);
1588 #else /* POLYMORPHIC */
1589 return vqrshlq_m_n_s32(a
, b
, p
);
1590 #endif /* POLYMORPHIC */
1593 // CHECK-LABEL: @test_vqrshlq_m_n_u8(
1594 // CHECK-NEXT: entry:
1595 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1596 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1597 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshl.scalar.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 1, <16 x i1> [[TMP1]])
1598 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1600 uint8x16_t
test_vqrshlq_m_n_u8(uint8x16_t a
, int32_t b
, mve_pred16_t p
)
1603 return vqrshlq_m_n(a
, b
, p
);
1604 #else /* POLYMORPHIC */
1605 return vqrshlq_m_n_u8(a
, b
, p
);
1606 #endif /* POLYMORPHIC */
1609 // CHECK-LABEL: @test_vqrshlq_m_n_u16(
1610 // CHECK-NEXT: entry:
1611 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1612 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1613 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshl.scalar.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 1, <8 x i1> [[TMP1]])
1614 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1616 uint16x8_t
test_vqrshlq_m_n_u16(uint16x8_t a
, int32_t b
, mve_pred16_t p
)
1619 return vqrshlq_m_n(a
, b
, p
);
1620 #else /* POLYMORPHIC */
1621 return vqrshlq_m_n_u16(a
, b
, p
);
1622 #endif /* POLYMORPHIC */
1625 // CHECK-LABEL: @test_vqrshlq_m_n_u32(
1626 // CHECK-NEXT: entry:
1627 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1628 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1629 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vshl.scalar.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], i32 [[B:%.*]], i32 1, i32 1, i32 1, <4 x i1> [[TMP1]])
1630 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1632 uint32x4_t
test_vqrshlq_m_n_u32(uint32x4_t a
, int32_t b
, mve_pred16_t p
)
1635 return vqrshlq_m_n(a
, b
, p
);
1636 #else /* POLYMORPHIC */
1637 return vqrshlq_m_n_u32(a
, b
, p
);
1638 #endif /* POLYMORPHIC */