1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 // CHECK-LABEL: @test_vshrnbq_n_s16(
11 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 3, i32 0, i32 0, i32 0, i32 0, i32 0)
12 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
14 int8x16_t
test_vshrnbq_n_s16(int8x16_t a
, int16x8_t b
)
17 return vshrnbq(a
, b
, 3);
18 #else /* POLYMORPHIC */
19 return vshrnbq_n_s16(a
, b
, 3);
20 #endif /* POLYMORPHIC */
23 // CHECK-LABEL: @test_vshrnbq_n_s32(
25 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 9, i32 0, i32 0, i32 0, i32 0, i32 0)
26 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
28 int16x8_t
test_vshrnbq_n_s32(int16x8_t a
, int32x4_t b
)
31 return vshrnbq(a
, b
, 9);
32 #else /* POLYMORPHIC */
33 return vshrnbq_n_s32(a
, b
, 9);
34 #endif /* POLYMORPHIC */
37 // CHECK-LABEL: @test_vshrnbq_n_u16(
39 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0, i32 1, i32 1, i32 0)
40 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
42 uint8x16_t
test_vshrnbq_n_u16(uint8x16_t a
, uint16x8_t b
)
45 return vshrnbq(a
, b
, 1);
46 #else /* POLYMORPHIC */
47 return vshrnbq_n_u16(a
, b
, 1);
48 #endif /* POLYMORPHIC */
51 // CHECK-LABEL: @test_vshrnbq_n_u32(
53 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 3, i32 0, i32 0, i32 1, i32 1, i32 0)
54 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
56 uint16x8_t
test_vshrnbq_n_u32(uint16x8_t a
, uint32x4_t b
)
59 return vshrnbq(a
, b
, 3);
60 #else /* POLYMORPHIC */
61 return vshrnbq_n_u32(a
, b
, 3);
62 #endif /* POLYMORPHIC */
65 // CHECK-LABEL: @test_vshrntq_n_s16(
67 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0, i32 0, i32 0, i32 1)
68 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
70 int8x16_t
test_vshrntq_n_s16(int8x16_t a
, int16x8_t b
)
73 return vshrntq(a
, b
, 1);
74 #else /* POLYMORPHIC */
75 return vshrntq_n_s16(a
, b
, 1);
76 #endif /* POLYMORPHIC */
79 // CHECK-LABEL: @test_vshrntq_n_s32(
81 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 10, i32 0, i32 0, i32 0, i32 0, i32 1)
82 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
84 int16x8_t
test_vshrntq_n_s32(int16x8_t a
, int32x4_t b
)
87 return vshrntq(a
, b
, 10);
88 #else /* POLYMORPHIC */
89 return vshrntq_n_s32(a
, b
, 10);
90 #endif /* POLYMORPHIC */
93 // CHECK-LABEL: @test_vshrntq_n_u16(
95 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 6, i32 0, i32 0, i32 1, i32 1, i32 1)
96 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
98 uint8x16_t
test_vshrntq_n_u16(uint8x16_t a
, uint16x8_t b
)
101 return vshrntq(a
, b
, 6);
102 #else /* POLYMORPHIC */
103 return vshrntq_n_u16(a
, b
, 6);
104 #endif /* POLYMORPHIC */
107 // CHECK-LABEL: @test_vshrntq_n_u32(
108 // CHECK-NEXT: entry:
109 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 10, i32 0, i32 0, i32 1, i32 1, i32 1)
110 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
112 uint16x8_t
test_vshrntq_n_u32(uint16x8_t a
, uint32x4_t b
)
115 return vshrntq(a
, b
, 10);
116 #else /* POLYMORPHIC */
117 return vshrntq_n_u32(a
, b
, 10);
118 #endif /* POLYMORPHIC */
121 // CHECK-LABEL: @test_vshrnbq_m_n_s16(
122 // CHECK-NEXT: entry:
123 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
124 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
125 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 4, i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i1> [[TMP1]])
126 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
128 int8x16_t
test_vshrnbq_m_n_s16(int8x16_t a
, int16x8_t b
, mve_pred16_t p
)
131 return vshrnbq_m(a
, b
, 4, p
);
132 #else /* POLYMORPHIC */
133 return vshrnbq_m_n_s16(a
, b
, 4, p
);
134 #endif /* POLYMORPHIC */
137 // CHECK-LABEL: @test_vshrnbq_m_n_s32(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
140 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
141 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 13, i32 0, i32 0, i32 0, i32 0, i32 0, <4 x i1> [[TMP1]])
142 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
144 int16x8_t
test_vshrnbq_m_n_s32(int16x8_t a
, int32x4_t b
, mve_pred16_t p
)
147 return vshrnbq_m(a
, b
, 13, p
);
148 #else /* POLYMORPHIC */
149 return vshrnbq_m_n_s32(a
, b
, 13, p
);
150 #endif /* POLYMORPHIC */
153 // CHECK-LABEL: @test_vshrnbq_m_n_u16(
154 // CHECK-NEXT: entry:
155 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
156 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
157 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 0, i32 0, i32 1, i32 1, i32 0, <8 x i1> [[TMP1]])
158 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
160 uint8x16_t
test_vshrnbq_m_n_u16(uint8x16_t a
, uint16x8_t b
, mve_pred16_t p
)
163 return vshrnbq_m(a
, b
, 7, p
);
164 #else /* POLYMORPHIC */
165 return vshrnbq_m_n_u16(a
, b
, 7, p
);
166 #endif /* POLYMORPHIC */
169 // CHECK-LABEL: @test_vshrnbq_m_n_u32(
170 // CHECK-NEXT: entry:
171 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
172 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
173 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 15, i32 0, i32 0, i32 1, i32 1, i32 0, <4 x i1> [[TMP1]])
174 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
176 uint16x8_t
test_vshrnbq_m_n_u32(uint16x8_t a
, uint32x4_t b
, mve_pred16_t p
)
179 return vshrnbq_m(a
, b
, 15, p
);
180 #else /* POLYMORPHIC */
181 return vshrnbq_m_n_u32(a
, b
, 15, p
);
182 #endif /* POLYMORPHIC */
185 // CHECK-LABEL: @test_vshrntq_m_n_s16(
186 // CHECK-NEXT: entry:
187 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
188 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
189 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 6, i32 0, i32 0, i32 0, i32 0, i32 1, <8 x i1> [[TMP1]])
190 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
192 int8x16_t
test_vshrntq_m_n_s16(int8x16_t a
, int16x8_t b
, mve_pred16_t p
)
195 return vshrntq_m(a
, b
, 6, p
);
196 #else /* POLYMORPHIC */
197 return vshrntq_m_n_s16(a
, b
, 6, p
);
198 #endif /* POLYMORPHIC */
201 // CHECK-LABEL: @test_vshrntq_m_n_s32(
202 // CHECK-NEXT: entry:
203 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
204 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
205 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 13, i32 0, i32 0, i32 0, i32 0, i32 1, <4 x i1> [[TMP1]])
206 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
208 int16x8_t
test_vshrntq_m_n_s32(int16x8_t a
, int32x4_t b
, mve_pred16_t p
)
211 return vshrntq_m(a
, b
, 13, p
);
212 #else /* POLYMORPHIC */
213 return vshrntq_m_n_s32(a
, b
, 13, p
);
214 #endif /* POLYMORPHIC */
217 // CHECK-LABEL: @test_vshrntq_m_n_u16(
218 // CHECK-NEXT: entry:
219 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
220 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
221 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 0, i32 1, i32 1, i32 1, <8 x i1> [[TMP1]])
222 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
224 uint8x16_t
test_vshrntq_m_n_u16(uint8x16_t a
, uint16x8_t b
, mve_pred16_t p
)
227 return vshrntq_m(a
, b
, 1, p
);
228 #else /* POLYMORPHIC */
229 return vshrntq_m_n_u16(a
, b
, 1, p
);
230 #endif /* POLYMORPHIC */
233 // CHECK-LABEL: @test_vshrntq_m_n_u32(
234 // CHECK-NEXT: entry:
235 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
236 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
237 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 10, i32 0, i32 0, i32 1, i32 1, i32 1, <4 x i1> [[TMP1]])
238 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
240 uint16x8_t
test_vshrntq_m_n_u32(uint16x8_t a
, uint32x4_t b
, mve_pred16_t p
)
243 return vshrntq_m(a
, b
, 10, p
);
244 #else /* POLYMORPHIC */
245 return vshrntq_m_n_u32(a
, b
, 10, p
);
246 #endif /* POLYMORPHIC */
249 // CHECK-LABEL: @test_vrshrnbq_n_s16(
250 // CHECK-NEXT: entry:
251 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 5, i32 0, i32 1, i32 0, i32 0, i32 0)
252 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
254 int8x16_t
test_vrshrnbq_n_s16(int8x16_t a
, int16x8_t b
)
257 return vrshrnbq(a
, b
, 5);
258 #else /* POLYMORPHIC */
259 return vrshrnbq_n_s16(a
, b
, 5);
260 #endif /* POLYMORPHIC */
263 // CHECK-LABEL: @test_vrshrnbq_n_s32(
264 // CHECK-NEXT: entry:
265 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 10, i32 0, i32 1, i32 0, i32 0, i32 0)
266 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
268 int16x8_t
test_vrshrnbq_n_s32(int16x8_t a
, int32x4_t b
)
271 return vrshrnbq(a
, b
, 10);
272 #else /* POLYMORPHIC */
273 return vrshrnbq_n_s32(a
, b
, 10);
274 #endif /* POLYMORPHIC */
277 // CHECK-LABEL: @test_vrshrnbq_n_u16(
278 // CHECK-NEXT: entry:
279 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 2, i32 0, i32 1, i32 1, i32 1, i32 0)
280 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
282 uint8x16_t
test_vrshrnbq_n_u16(uint8x16_t a
, uint16x8_t b
)
285 return vrshrnbq(a
, b
, 2);
286 #else /* POLYMORPHIC */
287 return vrshrnbq_n_u16(a
, b
, 2);
288 #endif /* POLYMORPHIC */
291 // CHECK-LABEL: @test_vrshrnbq_n_u32(
292 // CHECK-NEXT: entry:
293 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 12, i32 0, i32 1, i32 1, i32 1, i32 0)
294 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
296 uint16x8_t
test_vrshrnbq_n_u32(uint16x8_t a
, uint32x4_t b
)
299 return vrshrnbq(a
, b
, 12);
300 #else /* POLYMORPHIC */
301 return vrshrnbq_n_u32(a
, b
, 12);
302 #endif /* POLYMORPHIC */
305 // CHECK-LABEL: @test_vrshrntq_n_s16(
306 // CHECK-NEXT: entry:
307 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 4, i32 0, i32 1, i32 0, i32 0, i32 1)
308 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
310 int8x16_t
test_vrshrntq_n_s16(int8x16_t a
, int16x8_t b
)
313 return vrshrntq(a
, b
, 4);
314 #else /* POLYMORPHIC */
315 return vrshrntq_n_s16(a
, b
, 4);
316 #endif /* POLYMORPHIC */
319 // CHECK-LABEL: @test_vrshrntq_n_s32(
320 // CHECK-NEXT: entry:
321 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 11, i32 0, i32 1, i32 0, i32 0, i32 1)
322 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
324 int16x8_t
test_vrshrntq_n_s32(int16x8_t a
, int32x4_t b
)
327 return vrshrntq(a
, b
, 11);
328 #else /* POLYMORPHIC */
329 return vrshrntq_n_s32(a
, b
, 11);
330 #endif /* POLYMORPHIC */
333 // CHECK-LABEL: @test_vrshrntq_n_u16(
334 // CHECK-NEXT: entry:
335 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 1, i32 1, i32 1, i32 1)
336 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
338 uint8x16_t
test_vrshrntq_n_u16(uint8x16_t a
, uint16x8_t b
)
341 return vrshrntq(a
, b
, 1);
342 #else /* POLYMORPHIC */
343 return vrshrntq_n_u16(a
, b
, 1);
344 #endif /* POLYMORPHIC */
347 // CHECK-LABEL: @test_vrshrntq_n_u32(
348 // CHECK-NEXT: entry:
349 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 6, i32 0, i32 1, i32 1, i32 1, i32 1)
350 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
352 uint16x8_t
test_vrshrntq_n_u32(uint16x8_t a
, uint32x4_t b
)
355 return vrshrntq(a
, b
, 6);
356 #else /* POLYMORPHIC */
357 return vrshrntq_n_u32(a
, b
, 6);
358 #endif /* POLYMORPHIC */
361 // CHECK-LABEL: @test_vrshrnbq_m_n_s16(
362 // CHECK-NEXT: entry:
363 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
364 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
365 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 0, i32 1, i32 0, i32 0, i32 0, <8 x i1> [[TMP1]])
366 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
368 int8x16_t
test_vrshrnbq_m_n_s16(int8x16_t a
, int16x8_t b
, mve_pred16_t p
)
371 return vrshrnbq_m(a
, b
, 1, p
);
372 #else /* POLYMORPHIC */
373 return vrshrnbq_m_n_s16(a
, b
, 1, p
);
374 #endif /* POLYMORPHIC */
377 // CHECK-LABEL: @test_vrshrnbq_m_n_s32(
378 // CHECK-NEXT: entry:
379 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
380 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
381 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 14, i32 0, i32 1, i32 0, i32 0, i32 0, <4 x i1> [[TMP1]])
382 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
384 int16x8_t
test_vrshrnbq_m_n_s32(int16x8_t a
, int32x4_t b
, mve_pred16_t p
)
387 return vrshrnbq_m(a
, b
, 14, p
);
388 #else /* POLYMORPHIC */
389 return vrshrnbq_m_n_s32(a
, b
, 14, p
);
390 #endif /* POLYMORPHIC */
393 // CHECK-LABEL: @test_vrshrnbq_m_n_u16(
394 // CHECK-NEXT: entry:
395 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
396 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
397 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 2, i32 0, i32 1, i32 1, i32 1, i32 0, <8 x i1> [[TMP1]])
398 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
400 uint8x16_t
test_vrshrnbq_m_n_u16(uint8x16_t a
, uint16x8_t b
, mve_pred16_t p
)
403 return vrshrnbq_m(a
, b
, 2, p
);
404 #else /* POLYMORPHIC */
405 return vrshrnbq_m_n_u16(a
, b
, 2, p
);
406 #endif /* POLYMORPHIC */
409 // CHECK-LABEL: @test_vrshrnbq_m_n_u32(
410 // CHECK-NEXT: entry:
411 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
412 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
413 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 12, i32 0, i32 1, i32 1, i32 1, i32 0, <4 x i1> [[TMP1]])
414 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
416 uint16x8_t
test_vrshrnbq_m_n_u32(uint16x8_t a
, uint32x4_t b
, mve_pred16_t p
)
419 return vrshrnbq_m(a
, b
, 12, p
);
420 #else /* POLYMORPHIC */
421 return vrshrnbq_m_n_u32(a
, b
, 12, p
);
422 #endif /* POLYMORPHIC */
425 // CHECK-LABEL: @test_vrshrntq_m_n_s16(
426 // CHECK-NEXT: entry:
427 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
428 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
429 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 4, i32 0, i32 1, i32 0, i32 0, i32 1, <8 x i1> [[TMP1]])
430 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
432 int8x16_t
test_vrshrntq_m_n_s16(int8x16_t a
, int16x8_t b
, mve_pred16_t p
)
435 return vrshrntq_m(a
, b
, 4, p
);
436 #else /* POLYMORPHIC */
437 return vrshrntq_m_n_s16(a
, b
, 4, p
);
438 #endif /* POLYMORPHIC */
441 // CHECK-LABEL: @test_vrshrntq_m_n_s32(
442 // CHECK-NEXT: entry:
443 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
444 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
445 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 6, i32 0, i32 1, i32 0, i32 0, i32 1, <4 x i1> [[TMP1]])
446 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
448 int16x8_t
test_vrshrntq_m_n_s32(int16x8_t a
, int32x4_t b
, mve_pred16_t p
)
451 return vrshrntq_m(a
, b
, 6, p
);
452 #else /* POLYMORPHIC */
453 return vrshrntq_m_n_s32(a
, b
, 6, p
);
454 #endif /* POLYMORPHIC */
457 // CHECK-LABEL: @test_vrshrntq_m_n_u16(
458 // CHECK-NEXT: entry:
459 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
460 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
461 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 6, i32 0, i32 1, i32 1, i32 1, i32 1, <8 x i1> [[TMP1]])
462 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
464 uint8x16_t
test_vrshrntq_m_n_u16(uint8x16_t a
, uint16x8_t b
, mve_pred16_t p
)
467 return vrshrntq_m(a
, b
, 6, p
);
468 #else /* POLYMORPHIC */
469 return vrshrntq_m_n_u16(a
, b
, 6, p
);
470 #endif /* POLYMORPHIC */
473 // CHECK-LABEL: @test_vrshrntq_m_n_u32(
474 // CHECK-NEXT: entry:
475 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
476 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
477 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 10, i32 0, i32 1, i32 1, i32 1, i32 1, <4 x i1> [[TMP1]])
478 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
480 uint16x8_t
test_vrshrntq_m_n_u32(uint16x8_t a
, uint32x4_t b
, mve_pred16_t p
)
483 return vrshrntq_m(a
, b
, 10, p
);
484 #else /* POLYMORPHIC */
485 return vrshrntq_m_n_u32(a
, b
, 10, p
);
486 #endif /* POLYMORPHIC */
489 // CHECK-LABEL: @test_vqshrnbq_n_s16(
490 // CHECK-NEXT: entry:
491 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 1, i32 0, i32 0, i32 0, i32 0)
492 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
494 int8x16_t
test_vqshrnbq_n_s16(int8x16_t a
, int16x8_t b
)
497 return vqshrnbq(a
, b
, 7);
498 #else /* POLYMORPHIC */
499 return vqshrnbq_n_s16(a
, b
, 7);
500 #endif /* POLYMORPHIC */
503 // CHECK-LABEL: @test_vqshrnbq_n_s32(
504 // CHECK-NEXT: entry:
505 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 15, i32 1, i32 0, i32 0, i32 0, i32 0)
506 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
508 int16x8_t
test_vqshrnbq_n_s32(int16x8_t a
, int32x4_t b
)
511 return vqshrnbq(a
, b
, 15);
512 #else /* POLYMORPHIC */
513 return vqshrnbq_n_s32(a
, b
, 15);
514 #endif /* POLYMORPHIC */
517 // CHECK-LABEL: @test_vqshrnbq_n_u16(
518 // CHECK-NEXT: entry:
519 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 3, i32 1, i32 0, i32 1, i32 1, i32 0)
520 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
522 uint8x16_t
test_vqshrnbq_n_u16(uint8x16_t a
, uint16x8_t b
)
525 return vqshrnbq(a
, b
, 3);
526 #else /* POLYMORPHIC */
527 return vqshrnbq_n_u16(a
, b
, 3);
528 #endif /* POLYMORPHIC */
531 // CHECK-LABEL: @test_vqshrnbq_n_u32(
532 // CHECK-NEXT: entry:
533 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 3, i32 1, i32 0, i32 1, i32 1, i32 0)
534 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
536 uint16x8_t
test_vqshrnbq_n_u32(uint16x8_t a
, uint32x4_t b
)
539 return vqshrnbq(a
, b
, 3);
540 #else /* POLYMORPHIC */
541 return vqshrnbq_n_u32(a
, b
, 3);
542 #endif /* POLYMORPHIC */
545 // CHECK-LABEL: @test_vqshrntq_n_s16(
546 // CHECK-NEXT: entry:
547 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 5, i32 1, i32 0, i32 0, i32 0, i32 1)
548 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
550 int8x16_t
test_vqshrntq_n_s16(int8x16_t a
, int16x8_t b
)
553 return vqshrntq(a
, b
, 5);
554 #else /* POLYMORPHIC */
555 return vqshrntq_n_s16(a
, b
, 5);
556 #endif /* POLYMORPHIC */
559 // CHECK-LABEL: @test_vqshrntq_n_s32(
560 // CHECK-NEXT: entry:
561 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 6, i32 1, i32 0, i32 0, i32 0, i32 1)
562 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
564 int16x8_t
test_vqshrntq_n_s32(int16x8_t a
, int32x4_t b
)
567 return vqshrntq(a
, b
, 6);
568 #else /* POLYMORPHIC */
569 return vqshrntq_n_s32(a
, b
, 6);
570 #endif /* POLYMORPHIC */
573 // CHECK-LABEL: @test_vqshrntq_n_u16(
574 // CHECK-NEXT: entry:
575 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0, i32 1, i32 1, i32 1)
576 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
578 uint8x16_t
test_vqshrntq_n_u16(uint8x16_t a
, uint16x8_t b
)
581 return vqshrntq(a
, b
, 1);
582 #else /* POLYMORPHIC */
583 return vqshrntq_n_u16(a
, b
, 1);
584 #endif /* POLYMORPHIC */
587 // CHECK-LABEL: @test_vqshrntq_n_u32(
588 // CHECK-NEXT: entry:
589 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 15, i32 1, i32 0, i32 1, i32 1, i32 1)
590 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
592 uint16x8_t
test_vqshrntq_n_u32(uint16x8_t a
, uint32x4_t b
)
595 return vqshrntq(a
, b
, 15);
596 #else /* POLYMORPHIC */
597 return vqshrntq_n_u32(a
, b
, 15);
598 #endif /* POLYMORPHIC */
601 // CHECK-LABEL: @test_vqshrnbq_m_n_s16(
602 // CHECK-NEXT: entry:
603 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
604 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
605 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 1, i32 0, i32 0, i32 0, i32 0, <8 x i1> [[TMP1]])
606 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
608 int8x16_t
test_vqshrnbq_m_n_s16(int8x16_t a
, int16x8_t b
, mve_pred16_t p
)
611 return vqshrnbq_m(a
, b
, 7, p
);
612 #else /* POLYMORPHIC */
613 return vqshrnbq_m_n_s16(a
, b
, 7, p
);
614 #endif /* POLYMORPHIC */
617 // CHECK-LABEL: @test_vqshrnbq_m_n_s32(
618 // CHECK-NEXT: entry:
619 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
620 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
621 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0, i32 0, i32 0, i32 0, <4 x i1> [[TMP1]])
622 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
624 int16x8_t
test_vqshrnbq_m_n_s32(int16x8_t a
, int32x4_t b
, mve_pred16_t p
)
627 return vqshrnbq_m(a
, b
, 1, p
);
628 #else /* POLYMORPHIC */
629 return vqshrnbq_m_n_s32(a
, b
, 1, p
);
630 #endif /* POLYMORPHIC */
633 // CHECK-LABEL: @test_vqshrnbq_m_n_u16(
634 // CHECK-NEXT: entry:
635 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
636 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
637 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0, i32 1, i32 1, i32 0, <8 x i1> [[TMP1]])
638 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
640 uint8x16_t
test_vqshrnbq_m_n_u16(uint8x16_t a
, uint16x8_t b
, mve_pred16_t p
)
643 return vqshrnbq_m(a
, b
, 1, p
);
644 #else /* POLYMORPHIC */
645 return vqshrnbq_m_n_u16(a
, b
, 1, p
);
646 #endif /* POLYMORPHIC */
649 // CHECK-LABEL: @test_vqshrnbq_m_n_u32(
650 // CHECK-NEXT: entry:
651 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
652 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
653 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 8, i32 1, i32 0, i32 1, i32 1, i32 0, <4 x i1> [[TMP1]])
654 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
656 uint16x8_t
test_vqshrnbq_m_n_u32(uint16x8_t a
, uint32x4_t b
, mve_pred16_t p
)
659 return vqshrnbq_m(a
, b
, 8, p
);
660 #else /* POLYMORPHIC */
661 return vqshrnbq_m_n_u32(a
, b
, 8, p
);
662 #endif /* POLYMORPHIC */
665 // CHECK-LABEL: @test_vqshrntq_m_n_s16(
666 // CHECK-NEXT: entry:
667 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
668 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
669 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 0, i32 0, i32 0, i32 1, <8 x i1> [[TMP1]])
670 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
672 int8x16_t
test_vqshrntq_m_n_s16(int8x16_t a
, int16x8_t b
, mve_pred16_t p
)
675 return vqshrntq_m(a
, b
, 1, p
);
676 #else /* POLYMORPHIC */
677 return vqshrntq_m_n_s16(a
, b
, 1, p
);
678 #endif /* POLYMORPHIC */
681 // CHECK-LABEL: @test_vqshrntq_m_n_s32(
682 // CHECK-NEXT: entry:
683 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
684 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
685 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 11, i32 1, i32 0, i32 0, i32 0, i32 1, <4 x i1> [[TMP1]])
686 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
688 int16x8_t
test_vqshrntq_m_n_s32(int16x8_t a
, int32x4_t b
, mve_pred16_t p
)
691 return vqshrntq_m(a
, b
, 11, p
);
692 #else /* POLYMORPHIC */
693 return vqshrntq_m_n_s32(a
, b
, 11, p
);
694 #endif /* POLYMORPHIC */
697 // CHECK-LABEL: @test_vqshrntq_m_n_u16(
698 // CHECK-NEXT: entry:
699 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
700 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
701 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 3, i32 1, i32 0, i32 1, i32 1, i32 1, <8 x i1> [[TMP1]])
702 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
704 uint8x16_t
test_vqshrntq_m_n_u16(uint8x16_t a
, uint16x8_t b
, mve_pred16_t p
)
707 return vqshrntq_m(a
, b
, 3, p
);
708 #else /* POLYMORPHIC */
709 return vqshrntq_m_n_u16(a
, b
, 3, p
);
710 #endif /* POLYMORPHIC */
713 // CHECK-LABEL: @test_vqshrntq_m_n_u32(
714 // CHECK-NEXT: entry:
715 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
716 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
717 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 0, i32 1, i32 1, i32 1, <4 x i1> [[TMP1]])
718 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
720 uint16x8_t
test_vqshrntq_m_n_u32(uint16x8_t a
, uint32x4_t b
, mve_pred16_t p
)
723 return vqshrntq_m(a
, b
, 1, p
);
724 #else /* POLYMORPHIC */
725 return vqshrntq_m_n_u32(a
, b
, 1, p
);
726 #endif /* POLYMORPHIC */
729 // CHECK-LABEL: @test_vqshrunbq_n_s16(
730 // CHECK-NEXT: entry:
731 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 5, i32 1, i32 0, i32 1, i32 0, i32 0)
732 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
734 uint8x16_t
test_vqshrunbq_n_s16(uint8x16_t a
, int16x8_t b
)
737 return vqshrunbq(a
, b
, 5);
738 #else /* POLYMORPHIC */
739 return vqshrunbq_n_s16(a
, b
, 5);
740 #endif /* POLYMORPHIC */
743 // CHECK-LABEL: @test_vqshrunbq_n_s32(
744 // CHECK-NEXT: entry:
745 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 13, i32 1, i32 0, i32 1, i32 0, i32 0)
746 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
748 uint16x8_t
test_vqshrunbq_n_s32(uint16x8_t a
, int32x4_t b
)
751 return vqshrunbq(a
, b
, 13);
752 #else /* POLYMORPHIC */
753 return vqshrunbq_n_s32(a
, b
, 13);
754 #endif /* POLYMORPHIC */
757 // CHECK-LABEL: @test_vqshruntq_n_s16(
758 // CHECK-NEXT: entry:
759 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 2, i32 1, i32 0, i32 1, i32 0, i32 1)
760 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
762 uint8x16_t
test_vqshruntq_n_s16(uint8x16_t a
, int16x8_t b
)
765 return vqshruntq(a
, b
, 2);
766 #else /* POLYMORPHIC */
767 return vqshruntq_n_s16(a
, b
, 2);
768 #endif /* POLYMORPHIC */
771 // CHECK-LABEL: @test_vqshruntq_n_s32(
772 // CHECK-NEXT: entry:
773 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 7, i32 1, i32 0, i32 1, i32 0, i32 1)
774 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
776 uint16x8_t
test_vqshruntq_n_s32(uint16x8_t a
, int32x4_t b
)
779 return vqshruntq(a
, b
, 7);
780 #else /* POLYMORPHIC */
781 return vqshruntq_n_s32(a
, b
, 7);
782 #endif /* POLYMORPHIC */
785 // CHECK-LABEL: @test_vqshrunbq_m_n_s16(
786 // CHECK-NEXT: entry:
787 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
788 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
789 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 1, i32 0, i32 1, i32 0, i32 0, <8 x i1> [[TMP1]])
790 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
792 uint8x16_t
test_vqshrunbq_m_n_s16(uint8x16_t a
, int16x8_t b
, mve_pred16_t p
)
795 return vqshrunbq_m(a
, b
, 7, p
);
796 #else /* POLYMORPHIC */
797 return vqshrunbq_m_n_s16(a
, b
, 7, p
);
798 #endif /* POLYMORPHIC */
801 // CHECK-LABEL: @test_vqshrunbq_m_n_s32(
802 // CHECK-NEXT: entry:
803 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
804 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
805 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 7, i32 1, i32 0, i32 1, i32 0, i32 0, <4 x i1> [[TMP1]])
806 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
808 uint16x8_t
test_vqshrunbq_m_n_s32(uint16x8_t a
, int32x4_t b
, mve_pred16_t p
)
811 return vqshrunbq_m(a
, b
, 7, p
);
812 #else /* POLYMORPHIC */
813 return vqshrunbq_m_n_s32(a
, b
, 7, p
);
814 #endif /* POLYMORPHIC */
817 // CHECK-LABEL: @test_vqshruntq_m_n_s16(
818 // CHECK-NEXT: entry:
819 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
820 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
821 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 1, i32 0, i32 1, i32 0, i32 1, <8 x i1> [[TMP1]])
822 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
824 uint8x16_t
test_vqshruntq_m_n_s16(uint8x16_t a
, int16x8_t b
, mve_pred16_t p
)
827 return vqshruntq_m(a
, b
, 7, p
);
828 #else /* POLYMORPHIC */
829 return vqshruntq_m_n_s16(a
, b
, 7, p
);
830 #endif /* POLYMORPHIC */
833 // CHECK-LABEL: @test_vqshruntq_m_n_s32(
834 // CHECK-NEXT: entry:
835 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
836 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
837 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 7, i32 1, i32 0, i32 1, i32 0, i32 1, <4 x i1> [[TMP1]])
838 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
840 uint16x8_t
test_vqshruntq_m_n_s32(uint16x8_t a
, int32x4_t b
, mve_pred16_t p
)
843 return vqshruntq_m(a
, b
, 7, p
);
844 #else /* POLYMORPHIC */
845 return vqshruntq_m_n_s32(a
, b
, 7, p
);
846 #endif /* POLYMORPHIC */
849 // CHECK-LABEL: @test_vqrshrnbq_n_s16(
850 // CHECK-NEXT: entry:
851 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 5, i32 1, i32 1, i32 0, i32 0, i32 0)
852 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
854 int8x16_t
test_vqrshrnbq_n_s16(int8x16_t a
, int16x8_t b
)
857 return vqrshrnbq(a
, b
, 5);
858 #else /* POLYMORPHIC */
859 return vqrshrnbq_n_s16(a
, b
, 5);
860 #endif /* POLYMORPHIC */
863 // CHECK-LABEL: @test_vqrshrnbq_n_s32(
864 // CHECK-NEXT: entry:
865 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 13, i32 1, i32 1, i32 0, i32 0, i32 0)
866 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
868 int16x8_t
test_vqrshrnbq_n_s32(int16x8_t a
, int32x4_t b
)
871 return vqrshrnbq(a
, b
, 13);
872 #else /* POLYMORPHIC */
873 return vqrshrnbq_n_s32(a
, b
, 13);
874 #endif /* POLYMORPHIC */
877 // CHECK-LABEL: @test_vqrshrnbq_n_u16(
878 // CHECK-NEXT: entry:
879 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 1, i32 1, i32 1, i32 1, i32 0)
880 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
882 uint8x16_t
test_vqrshrnbq_n_u16(uint8x16_t a
, uint16x8_t b
)
885 return vqrshrnbq(a
, b
, 7);
886 #else /* POLYMORPHIC */
887 return vqrshrnbq_n_u16(a
, b
, 7);
888 #endif /* POLYMORPHIC */
891 // CHECK-LABEL: @test_vqrshrnbq_n_u32(
892 // CHECK-NEXT: entry:
893 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 8, i32 1, i32 1, i32 1, i32 1, i32 0)
894 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
896 uint16x8_t
test_vqrshrnbq_n_u32(uint16x8_t a
, uint32x4_t b
)
899 return vqrshrnbq(a
, b
, 8);
900 #else /* POLYMORPHIC */
901 return vqrshrnbq_n_u32(a
, b
, 8);
902 #endif /* POLYMORPHIC */
905 // CHECK-LABEL: @test_vqrshrntq_n_s16(
906 // CHECK-NEXT: entry:
907 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 1, i32 1, i32 0, i32 0, i32 1)
908 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
910 int8x16_t
test_vqrshrntq_n_s16(int8x16_t a
, int16x8_t b
)
913 return vqrshrntq(a
, b
, 7);
914 #else /* POLYMORPHIC */
915 return vqrshrntq_n_s16(a
, b
, 7);
916 #endif /* POLYMORPHIC */
919 // CHECK-LABEL: @test_vqrshrntq_n_s32(
920 // CHECK-NEXT: entry:
921 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 2, i32 1, i32 1, i32 0, i32 0, i32 1)
922 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
924 int16x8_t
test_vqrshrntq_n_s32(int16x8_t a
, int32x4_t b
)
927 return vqrshrntq(a
, b
, 2);
928 #else /* POLYMORPHIC */
929 return vqrshrntq_n_s32(a
, b
, 2);
930 #endif /* POLYMORPHIC */
933 // CHECK-LABEL: @test_vqrshrntq_n_u16(
934 // CHECK-NEXT: entry:
935 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1, i32 1, i32 1, i32 1)
936 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
938 uint8x16_t
test_vqrshrntq_n_u16(uint8x16_t a
, uint16x8_t b
)
941 return vqrshrntq(a
, b
, 1);
942 #else /* POLYMORPHIC */
943 return vqrshrntq_n_u16(a
, b
, 1);
944 #endif /* POLYMORPHIC */
947 // CHECK-LABEL: @test_vqrshrntq_n_u32(
948 // CHECK-NEXT: entry:
949 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 11, i32 1, i32 1, i32 1, i32 1, i32 1)
950 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
952 uint16x8_t
test_vqrshrntq_n_u32(uint16x8_t a
, uint32x4_t b
)
955 return vqrshrntq(a
, b
, 11);
956 #else /* POLYMORPHIC */
957 return vqrshrntq_n_u32(a
, b
, 11);
958 #endif /* POLYMORPHIC */
961 // CHECK-LABEL: @test_vqrshrnbq_m_n_s16(
962 // CHECK-NEXT: entry:
963 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
964 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
965 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 2, i32 1, i32 1, i32 0, i32 0, i32 0, <8 x i1> [[TMP1]])
966 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
968 int8x16_t
test_vqrshrnbq_m_n_s16(int8x16_t a
, int16x8_t b
, mve_pred16_t p
)
971 return vqrshrnbq_m(a
, b
, 2, p
);
972 #else /* POLYMORPHIC */
973 return vqrshrnbq_m_n_s16(a
, b
, 2, p
);
974 #endif /* POLYMORPHIC */
977 // CHECK-LABEL: @test_vqrshrnbq_m_n_s32(
978 // CHECK-NEXT: entry:
979 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
980 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
981 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 12, i32 1, i32 1, i32 0, i32 0, i32 0, <4 x i1> [[TMP1]])
982 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
984 int16x8_t
test_vqrshrnbq_m_n_s32(int16x8_t a
, int32x4_t b
, mve_pred16_t p
)
987 return vqrshrnbq_m(a
, b
, 12, p
);
988 #else /* POLYMORPHIC */
989 return vqrshrnbq_m_n_s32(a
, b
, 12, p
);
990 #endif /* POLYMORPHIC */
993 // CHECK-LABEL: @test_vqrshrnbq_m_n_u16(
994 // CHECK-NEXT: entry:
995 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
996 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
997 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 5, i32 1, i32 1, i32 1, i32 1, i32 0, <8 x i1> [[TMP1]])
998 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1000 uint8x16_t
test_vqrshrnbq_m_n_u16(uint8x16_t a
, uint16x8_t b
, mve_pred16_t p
)
1003 return vqrshrnbq_m(a
, b
, 5, p
);
1004 #else /* POLYMORPHIC */
1005 return vqrshrnbq_m_n_u16(a
, b
, 5, p
);
1006 #endif /* POLYMORPHIC */
1009 // CHECK-LABEL: @test_vqrshrnbq_m_n_u32(
1010 // CHECK-NEXT: entry:
1011 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1012 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1013 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 11, i32 1, i32 1, i32 1, i32 1, i32 0, <4 x i1> [[TMP1]])
1014 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1016 uint16x8_t
test_vqrshrnbq_m_n_u32(uint16x8_t a
, uint32x4_t b
, mve_pred16_t p
)
1019 return vqrshrnbq_m(a
, b
, 11, p
);
1020 #else /* POLYMORPHIC */
1021 return vqrshrnbq_m_n_u32(a
, b
, 11, p
);
1022 #endif /* POLYMORPHIC */
1025 // CHECK-LABEL: @test_vqrshrntq_m_n_s16(
1026 // CHECK-NEXT: entry:
1027 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1028 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1029 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 4, i32 1, i32 1, i32 0, i32 0, i32 1, <8 x i1> [[TMP1]])
1030 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1032 int8x16_t
test_vqrshrntq_m_n_s16(int8x16_t a
, int16x8_t b
, mve_pred16_t p
)
1035 return vqrshrntq_m(a
, b
, 4, p
);
1036 #else /* POLYMORPHIC */
1037 return vqrshrntq_m_n_s16(a
, b
, 4, p
);
1038 #endif /* POLYMORPHIC */
1041 // CHECK-LABEL: @test_vqrshrntq_m_n_s32(
1042 // CHECK-NEXT: entry:
1043 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1044 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1045 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 6, i32 1, i32 1, i32 0, i32 0, i32 1, <4 x i1> [[TMP1]])
1046 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1048 int16x8_t
test_vqrshrntq_m_n_s32(int16x8_t a
, int32x4_t b
, mve_pred16_t p
)
1051 return vqrshrntq_m(a
, b
, 6, p
);
1052 #else /* POLYMORPHIC */
1053 return vqrshrntq_m_n_s32(a
, b
, 6, p
);
1054 #endif /* POLYMORPHIC */
1057 // CHECK-LABEL: @test_vqrshrntq_m_n_u16(
1058 // CHECK-NEXT: entry:
1059 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1060 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1061 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 1, i32 1, i32 1, i32 1, i32 1, <8 x i1> [[TMP1]])
1062 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1064 uint8x16_t
test_vqrshrntq_m_n_u16(uint8x16_t a
, uint16x8_t b
, mve_pred16_t p
)
1067 return vqrshrntq_m(a
, b
, 7, p
);
1068 #else /* POLYMORPHIC */
1069 return vqrshrntq_m_n_u16(a
, b
, 7, p
);
1070 #endif /* POLYMORPHIC */
1073 // CHECK-LABEL: @test_vqrshrntq_m_n_u32(
1074 // CHECK-NEXT: entry:
1075 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1076 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1077 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 15, i32 1, i32 1, i32 1, i32 1, i32 1, <4 x i1> [[TMP1]])
1078 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1080 uint16x8_t
test_vqrshrntq_m_n_u32(uint16x8_t a
, uint32x4_t b
, mve_pred16_t p
)
1083 return vqrshrntq_m(a
, b
, 15, p
);
1084 #else /* POLYMORPHIC */
1085 return vqrshrntq_m_n_u32(a
, b
, 15, p
);
1086 #endif /* POLYMORPHIC */
1089 // CHECK-LABEL: @test_vqrshrunbq_n_s16(
1090 // CHECK-NEXT: entry:
1091 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 7, i32 1, i32 1, i32 1, i32 0, i32 0)
1092 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1094 uint8x16_t
test_vqrshrunbq_n_s16(uint8x16_t a
, int16x8_t b
)
1097 return vqrshrunbq(a
, b
, 7);
1098 #else /* POLYMORPHIC */
1099 return vqrshrunbq_n_s16(a
, b
, 7);
1100 #endif /* POLYMORPHIC */
1103 // CHECK-LABEL: @test_vqrshrunbq_n_s32(
1104 // CHECK-NEXT: entry:
1105 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, i32 1, i32 1, i32 1, i32 0, i32 0)
1106 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1108 uint16x8_t
test_vqrshrunbq_n_s32(uint16x8_t a
, int32x4_t b
)
1111 return vqrshrunbq(a
, b
, 1);
1112 #else /* POLYMORPHIC */
1113 return vqrshrunbq_n_s32(a
, b
, 1);
1114 #endif /* POLYMORPHIC */
1117 // CHECK-LABEL: @test_vqrshruntq_n_s16(
1118 // CHECK-NEXT: entry:
1119 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.v16i8.v8i16(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, i32 1, i32 1, i32 1, i32 0, i32 1)
1120 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1122 uint8x16_t
test_vqrshruntq_n_s16(uint8x16_t a
, int16x8_t b
)
1125 return vqrshruntq(a
, b
, 1);
1126 #else /* POLYMORPHIC */
1127 return vqrshruntq_n_s16(a
, b
, 1);
1128 #endif /* POLYMORPHIC */
1131 // CHECK-LABEL: @test_vqrshruntq_n_s32(
1132 // CHECK-NEXT: entry:
1133 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.v8i16.v4i32(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 3, i32 1, i32 1, i32 1, i32 0, i32 1)
1134 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1136 uint16x8_t
test_vqrshruntq_n_s32(uint16x8_t a
, int32x4_t b
)
1139 return vqrshruntq(a
, b
, 3);
1140 #else /* POLYMORPHIC */
1141 return vqrshruntq_n_s32(a
, b
, 3);
1142 #endif /* POLYMORPHIC */
1145 // CHECK-LABEL: @test_vqrshrunbq_m_n_s16(
1146 // CHECK-NEXT: entry:
1147 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1148 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1149 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 4, i32 1, i32 1, i32 1, i32 0, i32 0, <8 x i1> [[TMP1]])
1150 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1152 uint8x16_t
test_vqrshrunbq_m_n_s16(uint8x16_t a
, int16x8_t b
, mve_pred16_t p
)
1155 return vqrshrunbq_m(a
, b
, 4, p
);
1156 #else /* POLYMORPHIC */
1157 return vqrshrunbq_m_n_s16(a
, b
, 4, p
);
1158 #endif /* POLYMORPHIC */
1161 // CHECK-LABEL: @test_vqrshrunbq_m_n_s32(
1162 // CHECK-NEXT: entry:
1163 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1164 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1165 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 10, i32 1, i32 1, i32 1, i32 0, i32 0, <4 x i1> [[TMP1]])
1166 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1168 uint16x8_t
test_vqrshrunbq_m_n_s32(uint16x8_t a
, int32x4_t b
, mve_pred16_t p
)
1171 return vqrshrunbq_m(a
, b
, 10, p
);
1172 #else /* POLYMORPHIC */
1173 return vqrshrunbq_m_n_s32(a
, b
, 10, p
);
1174 #endif /* POLYMORPHIC */
1177 // CHECK-LABEL: @test_vqrshruntq_m_n_s16(
1178 // CHECK-NEXT: entry:
1179 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1180 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1181 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vshrn.predicated.v16i8.v8i16.v8i1(<16 x i8> [[A:%.*]], <8 x i16> [[B:%.*]], i32 3, i32 1, i32 1, i32 1, i32 0, i32 1, <8 x i1> [[TMP1]])
1182 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1184 uint8x16_t
test_vqrshruntq_m_n_s16(uint8x16_t a
, int16x8_t b
, mve_pred16_t p
)
1187 return vqrshruntq_m(a
, b
, 3, p
);
1188 #else /* POLYMORPHIC */
1189 return vqrshruntq_m_n_s16(a
, b
, 3, p
);
1190 #endif /* POLYMORPHIC */
1193 // CHECK-LABEL: @test_vqrshruntq_m_n_s32(
1194 // CHECK-NEXT: entry:
1195 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1196 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1197 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vshrn.predicated.v8i16.v4i32.v4i1(<8 x i16> [[A:%.*]], <4 x i32> [[B:%.*]], i32 13, i32 1, i32 1, i32 1, i32 0, i32 1, <4 x i1> [[TMP1]])
1198 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1200 uint16x8_t
test_vqrshruntq_m_n_s32(uint16x8_t a
, int32x4_t b
, mve_pred16_t p
)
1203 return vqrshruntq_m(a
, b
, 13, p
);
1204 #else /* POLYMORPHIC */
1205 return vqrshruntq_m_n_s32(a
, b
, 13, p
);
1206 #endif /* POLYMORPHIC */
1209 // CHECK-LABEL: @test_vsliq_n_s8(
1210 // CHECK-NEXT: entry:
1211 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vsli.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 2)
1212 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1214 int8x16_t
test_vsliq_n_s8(int8x16_t a
, int8x16_t b
)
1217 return vsliq(a
, b
, 2);
1218 #else /* POLYMORPHIC */
1219 return vsliq_n_s8(a
, b
, 2);
1220 #endif /* POLYMORPHIC */
1223 // CHECK-LABEL: @test_vsliq_n_s16(
1224 // CHECK-NEXT: entry:
1225 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vsli.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 10)
1226 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1228 int16x8_t
test_vsliq_n_s16(int16x8_t a
, int16x8_t b
)
1231 return vsliq(a
, b
, 10);
1232 #else /* POLYMORPHIC */
1233 return vsliq_n_s16(a
, b
, 10);
1234 #endif /* POLYMORPHIC */
1237 // CHECK-LABEL: @test_vsliq_n_s32(
1238 // CHECK-NEXT: entry:
1239 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vsli.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1)
1240 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1242 int32x4_t
test_vsliq_n_s32(int32x4_t a
, int32x4_t b
)
1245 return vsliq(a
, b
, 1);
1246 #else /* POLYMORPHIC */
1247 return vsliq_n_s32(a
, b
, 1);
1248 #endif /* POLYMORPHIC */
1251 // CHECK-LABEL: @test_vsliq_n_u8(
1252 // CHECK-NEXT: entry:
1253 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vsli.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 1)
1254 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1256 uint8x16_t
test_vsliq_n_u8(uint8x16_t a
, uint8x16_t b
)
1259 return vsliq(a
, b
, 1);
1260 #else /* POLYMORPHIC */
1261 return vsliq_n_u8(a
, b
, 1);
1262 #endif /* POLYMORPHIC */
1265 // CHECK-LABEL: @test_vsliq_n_u16(
1266 // CHECK-NEXT: entry:
1267 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vsli.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1)
1268 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1270 uint16x8_t
test_vsliq_n_u16(uint16x8_t a
, uint16x8_t b
)
1273 return vsliq(a
, b
, 1);
1274 #else /* POLYMORPHIC */
1275 return vsliq_n_u16(a
, b
, 1);
1276 #endif /* POLYMORPHIC */
1279 // CHECK-LABEL: @test_vsliq_n_u32(
1280 // CHECK-NEXT: entry:
1281 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vsli.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 28)
1282 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1284 uint32x4_t
test_vsliq_n_u32(uint32x4_t a
, uint32x4_t b
)
1287 return vsliq(a
, b
, 28);
1288 #else /* POLYMORPHIC */
1289 return vsliq_n_u32(a
, b
, 28);
1290 #endif /* POLYMORPHIC */
1293 // CHECK-LABEL: @test_vsliq_m_n_s8(
1294 // CHECK-NEXT: entry:
1295 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1296 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1297 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vsli.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 4, <16 x i1> [[TMP1]])
1298 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1300 int8x16_t
test_vsliq_m_n_s8(int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
1303 return vsliq_m(a
, b
, 4, p
);
1304 #else /* POLYMORPHIC */
1305 return vsliq_m_n_s8(a
, b
, 4, p
);
1306 #endif /* POLYMORPHIC */
1309 // CHECK-LABEL: @test_vsliq_m_n_s16(
1310 // CHECK-NEXT: entry:
1311 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1312 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1313 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vsli.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, <8 x i1> [[TMP1]])
1314 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1316 int16x8_t
test_vsliq_m_n_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
1319 return vsliq_m(a
, b
, 1, p
);
1320 #else /* POLYMORPHIC */
1321 return vsliq_m_n_s16(a
, b
, 1, p
);
1322 #endif /* POLYMORPHIC */
1325 // CHECK-LABEL: @test_vsliq_m_n_s32(
1326 // CHECK-NEXT: entry:
1327 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1328 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1329 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vsli.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, <4 x i1> [[TMP1]])
1330 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1332 int32x4_t
test_vsliq_m_n_s32(int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1335 return vsliq_m(a
, b
, 1, p
);
1336 #else /* POLYMORPHIC */
1337 return vsliq_m_n_s32(a
, b
, 1, p
);
1338 #endif /* POLYMORPHIC */
1341 // CHECK-LABEL: @test_vsliq_m_n_u8(
1342 // CHECK-NEXT: entry:
1343 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1344 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1345 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vsli.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 5, <16 x i1> [[TMP1]])
1346 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1348 uint8x16_t
test_vsliq_m_n_u8(uint8x16_t a
, uint8x16_t b
, mve_pred16_t p
)
1351 return vsliq_m(a
, b
, 5, p
);
1352 #else /* POLYMORPHIC */
1353 return vsliq_m_n_u8(a
, b
, 5, p
);
1354 #endif /* POLYMORPHIC */
1357 // CHECK-LABEL: @test_vsliq_m_n_u16(
1358 // CHECK-NEXT: entry:
1359 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1360 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1361 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vsli.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 3, <8 x i1> [[TMP1]])
1362 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1364 uint16x8_t
test_vsliq_m_n_u16(uint16x8_t a
, uint16x8_t b
, mve_pred16_t p
)
1367 return vsliq_m(a
, b
, 3, p
);
1368 #else /* POLYMORPHIC */
1369 return vsliq_m_n_u16(a
, b
, 3, p
);
1370 #endif /* POLYMORPHIC */
1373 // CHECK-LABEL: @test_vsliq_m_n_u32(
1374 // CHECK-NEXT: entry:
1375 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1376 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1377 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vsli.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 9, <4 x i1> [[TMP1]])
1378 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1380 uint32x4_t
test_vsliq_m_n_u32(uint32x4_t a
, uint32x4_t b
, mve_pred16_t p
)
1383 return vsliq_m(a
, b
, 9, p
);
1384 #else /* POLYMORPHIC */
1385 return vsliq_m_n_u32(a
, b
, 9, p
);
1386 #endif /* POLYMORPHIC */
1389 // CHECK-LABEL: @test_vsriq_n_s8(
1390 // CHECK-NEXT: entry:
1391 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vsri.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 3)
1392 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1394 int8x16_t
test_vsriq_n_s8(int8x16_t a
, int8x16_t b
)
1397 return vsriq(a
, b
, 3);
1398 #else /* POLYMORPHIC */
1399 return vsriq_n_s8(a
, b
, 3);
1400 #endif /* POLYMORPHIC */
1403 // CHECK-LABEL: @test_vsriq_n_s16(
1404 // CHECK-NEXT: entry:
1405 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vsri.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 2)
1406 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1408 int16x8_t
test_vsriq_n_s16(int16x8_t a
, int16x8_t b
)
1411 return vsriq(a
, b
, 2);
1412 #else /* POLYMORPHIC */
1413 return vsriq_n_s16(a
, b
, 2);
1414 #endif /* POLYMORPHIC */
1417 // CHECK-LABEL: @test_vsriq_n_s32(
1418 // CHECK-NEXT: entry:
1419 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vsri.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 28)
1420 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1422 int32x4_t
test_vsriq_n_s32(int32x4_t a
, int32x4_t b
)
1425 return vsriq(a
, b
, 28);
1426 #else /* POLYMORPHIC */
1427 return vsriq_n_s32(a
, b
, 28);
1428 #endif /* POLYMORPHIC */
1431 // CHECK-LABEL: @test_vsriq_n_u8(
1432 // CHECK-NEXT: entry:
1433 // CHECK-NEXT: [[TMP0:%.*]] = call <16 x i8> @llvm.arm.mve.vsri.v16i8(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 3)
1434 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
1436 uint8x16_t
test_vsriq_n_u8(uint8x16_t a
, uint8x16_t b
)
1439 return vsriq(a
, b
, 3);
1440 #else /* POLYMORPHIC */
1441 return vsriq_n_u8(a
, b
, 3);
1442 #endif /* POLYMORPHIC */
1445 // CHECK-LABEL: @test_vsriq_n_u16(
1446 // CHECK-NEXT: entry:
1447 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x i16> @llvm.arm.mve.vsri.v8i16(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 3)
1448 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
1450 uint16x8_t
test_vsriq_n_u16(uint16x8_t a
, uint16x8_t b
)
1453 return vsriq(a
, b
, 3);
1454 #else /* POLYMORPHIC */
1455 return vsriq_n_u16(a
, b
, 3);
1456 #endif /* POLYMORPHIC */
1459 // CHECK-LABEL: @test_vsriq_n_u32(
1460 // CHECK-NEXT: entry:
1461 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.arm.mve.vsri.v4i32(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 26)
1462 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
1464 uint32x4_t
test_vsriq_n_u32(uint32x4_t a
, uint32x4_t b
)
1467 return vsriq(a
, b
, 26);
1468 #else /* POLYMORPHIC */
1469 return vsriq_n_u32(a
, b
, 26);
1470 #endif /* POLYMORPHIC */
1473 // CHECK-LABEL: @test_vsriq_m_n_s8(
1474 // CHECK-NEXT: entry:
1475 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1476 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1477 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vsri.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 4, <16 x i1> [[TMP1]])
1478 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1480 int8x16_t
test_vsriq_m_n_s8(int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
1483 return vsriq_m(a
, b
, 4, p
);
1484 #else /* POLYMORPHIC */
1485 return vsriq_m_n_s8(a
, b
, 4, p
);
1486 #endif /* POLYMORPHIC */
1489 // CHECK-LABEL: @test_vsriq_m_n_s16(
1490 // CHECK-NEXT: entry:
1491 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1492 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1493 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vsri.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 1, <8 x i1> [[TMP1]])
1494 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1496 int16x8_t
test_vsriq_m_n_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
1499 return vsriq_m(a
, b
, 1, p
);
1500 #else /* POLYMORPHIC */
1501 return vsriq_m_n_s16(a
, b
, 1, p
);
1502 #endif /* POLYMORPHIC */
1505 // CHECK-LABEL: @test_vsriq_m_n_s32(
1506 // CHECK-NEXT: entry:
1507 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1508 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1509 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vsri.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 27, <4 x i1> [[TMP1]])
1510 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1512 int32x4_t
test_vsriq_m_n_s32(int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1515 return vsriq_m(a
, b
, 27, p
);
1516 #else /* POLYMORPHIC */
1517 return vsriq_m_n_s32(a
, b
, 27, p
);
1518 #endif /* POLYMORPHIC */
1521 // CHECK-LABEL: @test_vsriq_m_n_u8(
1522 // CHECK-NEXT: entry:
1523 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1524 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1525 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.vsri.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], i32 7, <16 x i1> [[TMP1]])
1526 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
1528 uint8x16_t
test_vsriq_m_n_u8(uint8x16_t a
, uint8x16_t b
, mve_pred16_t p
)
1531 return vsriq_m(a
, b
, 7, p
);
1532 #else /* POLYMORPHIC */
1533 return vsriq_m_n_u8(a
, b
, 7, p
);
1534 #endif /* POLYMORPHIC */
1537 // CHECK-LABEL: @test_vsriq_m_n_u16(
1538 // CHECK-NEXT: entry:
1539 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1540 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1541 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.vsri.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i32 9, <8 x i1> [[TMP1]])
1542 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
1544 uint16x8_t
test_vsriq_m_n_u16(uint16x8_t a
, uint16x8_t b
, mve_pred16_t p
)
1547 return vsriq_m(a
, b
, 9, p
);
1548 #else /* POLYMORPHIC */
1549 return vsriq_m_n_u16(a
, b
, 9, p
);
1550 #endif /* POLYMORPHIC */
1553 // CHECK-LABEL: @test_vsriq_m_n_u32(
1554 // CHECK-NEXT: entry:
1555 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1556 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1557 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.vsri.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], i32 13, <4 x i1> [[TMP1]])
1558 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
1560 uint32x4_t
test_vsriq_m_n_u32(uint32x4_t a
, uint32x4_t b
, mve_pred16_t p
)
1563 return vsriq_m(a
, b
, 13, p
);
1564 #else /* POLYMORPHIC */
1565 return vsriq_m_n_u32(a
, b
, 13, p
);
1566 #endif /* POLYMORPHIC */