[LLVM][IR] Use splat syntax when printing ConstantExpr based splats. (#116856)
[llvm-project.git] / clang / test / CodeGen / arm-mve-intrinsics / vminvq.c
blobdf51dda8468db8fc127560a26f299470b3e8d6ca
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
7 #include <arm_mve.h>
9 // CHECK-LABEL: @test_vminvq_s8(
10 // CHECK-NEXT: entry:
11 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
12 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.minv.v16i8(i32 [[TMP0]], <16 x i8> [[B:%.*]], i32 0)
13 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
14 // CHECK-NEXT: ret i8 [[TMP2]]
16 int8_t test_vminvq_s8(int8_t a, int8x16_t b) {
17 #ifdef POLYMORPHIC
18 return vminvq(a, b);
19 #else /* POLYMORPHIC */
20 return vminvq_s8(a, b);
21 #endif /* POLYMORPHIC */
24 // CHECK-LABEL: @test_vminvq_s16(
25 // CHECK-NEXT: entry:
26 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
27 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.minv.v8i16(i32 [[TMP0]], <8 x i16> [[B:%.*]], i32 0)
28 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
29 // CHECK-NEXT: ret i16 [[TMP2]]
31 int16_t test_vminvq_s16(int16_t a, int16x8_t b) {
32 #ifdef POLYMORPHIC
33 return vminvq(a, b);
34 #else /* POLYMORPHIC */
35 return vminvq_s16(a, b);
36 #endif /* POLYMORPHIC */
39 // CHECK-LABEL: @test_vminvq_s32(
40 // CHECK-NEXT: entry:
41 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.minv.v4i32(i32 [[A:%.*]], <4 x i32> [[B:%.*]], i32 0)
42 // CHECK-NEXT: ret i32 [[TMP0]]
44 int32_t test_vminvq_s32(int32_t a, int32x4_t b) {
45 #ifdef POLYMORPHIC
46 return vminvq(a, b);
47 #else /* POLYMORPHIC */
48 return vminvq_s32(a, b);
49 #endif /* POLYMORPHIC */
52 // CHECK-LABEL: @test_vminvq_u8(
53 // CHECK-NEXT: entry:
54 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
55 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.minv.v16i8(i32 [[TMP0]], <16 x i8> [[B:%.*]], i32 1)
56 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
57 // CHECK-NEXT: ret i8 [[TMP2]]
59 uint8_t test_vminvq_u8(uint8_t a, uint8x16_t b) {
60 #ifdef POLYMORPHIC
61 return vminvq(a, b);
62 #else /* POLYMORPHIC */
63 return vminvq_u8(a, b);
64 #endif /* POLYMORPHIC */
67 // CHECK-LABEL: @test_vminvq_u16(
68 // CHECK-NEXT: entry:
69 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
70 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.minv.v8i16(i32 [[TMP0]], <8 x i16> [[B:%.*]], i32 1)
71 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
72 // CHECK-NEXT: ret i16 [[TMP2]]
74 uint16_t test_vminvq_u16(uint16_t a, uint16x8_t b) {
75 #ifdef POLYMORPHIC
76 return vminvq(a, b);
77 #else /* POLYMORPHIC */
78 return vminvq_u16(a, b);
79 #endif /* POLYMORPHIC */
82 // CHECK-LABEL: @test_vminvq_u32(
83 // CHECK-NEXT: entry:
84 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.minv.v4i32(i32 [[A:%.*]], <4 x i32> [[B:%.*]], i32 1)
85 // CHECK-NEXT: ret i32 [[TMP0]]
87 uint32_t test_vminvq_u32(uint32_t a, uint32x4_t b) {
88 #ifdef POLYMORPHIC
89 return vminvq(a, b);
90 #else /* POLYMORPHIC */
91 return vminvq_u32(a, b);
92 #endif /* POLYMORPHIC */
95 // CHECK-LABEL: @test_vmaxvq_s8(
96 // CHECK-NEXT: entry:
97 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
98 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.maxv.v16i8(i32 [[TMP0]], <16 x i8> [[B:%.*]], i32 0)
99 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
100 // CHECK-NEXT: ret i8 [[TMP2]]
102 int8_t test_vmaxvq_s8(int8_t a, int8x16_t b) {
103 #ifdef POLYMORPHIC
104 return vmaxvq(a, b);
105 #else /* POLYMORPHIC */
106 return vmaxvq_s8(a, b);
107 #endif /* POLYMORPHIC */
110 // CHECK-LABEL: @test_vmaxvq_s16(
111 // CHECK-NEXT: entry:
112 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
113 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.maxv.v8i16(i32 [[TMP0]], <8 x i16> [[B:%.*]], i32 0)
114 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
115 // CHECK-NEXT: ret i16 [[TMP2]]
117 int16_t test_vmaxvq_s16(int16_t a, int16x8_t b) {
118 #ifdef POLYMORPHIC
119 return vmaxvq(a, b);
120 #else /* POLYMORPHIC */
121 return vmaxvq_s16(a, b);
122 #endif /* POLYMORPHIC */
125 // CHECK-LABEL: @test_vmaxvq_s32(
126 // CHECK-NEXT: entry:
127 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.maxv.v4i32(i32 [[A:%.*]], <4 x i32> [[B:%.*]], i32 0)
128 // CHECK-NEXT: ret i32 [[TMP0]]
130 int32_t test_vmaxvq_s32(int32_t a, int32x4_t b) {
131 #ifdef POLYMORPHIC
132 return vmaxvq(a, b);
133 #else /* POLYMORPHIC */
134 return vmaxvq_s32(a, b);
135 #endif /* POLYMORPHIC */
138 // CHECK-LABEL: @test_vmaxvq_u8(
139 // CHECK-NEXT: entry:
140 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
141 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.maxv.v16i8(i32 [[TMP0]], <16 x i8> [[B:%.*]], i32 1)
142 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
143 // CHECK-NEXT: ret i8 [[TMP2]]
145 uint8_t test_vmaxvq_u8(uint8_t a, uint8x16_t b) {
146 #ifdef POLYMORPHIC
147 return vmaxvq(a, b);
148 #else /* POLYMORPHIC */
149 return vmaxvq_u8(a, b);
150 #endif /* POLYMORPHIC */
153 // CHECK-LABEL: @test_vmaxvq_u16(
154 // CHECK-NEXT: entry:
155 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
156 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.maxv.v8i16(i32 [[TMP0]], <8 x i16> [[B:%.*]], i32 1)
157 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
158 // CHECK-NEXT: ret i16 [[TMP2]]
160 uint16_t test_vmaxvq_u16(uint16_t a, uint16x8_t b) {
161 #ifdef POLYMORPHIC
162 return vmaxvq(a, b);
163 #else /* POLYMORPHIC */
164 return vmaxvq_u16(a, b);
165 #endif /* POLYMORPHIC */
168 // CHECK-LABEL: @test_vmaxvq_u32(
169 // CHECK-NEXT: entry:
170 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.maxv.v4i32(i32 [[A:%.*]], <4 x i32> [[B:%.*]], i32 1)
171 // CHECK-NEXT: ret i32 [[TMP0]]
173 uint32_t test_vmaxvq_u32(uint32_t a, uint32x4_t b) {
174 #ifdef POLYMORPHIC
175 return vmaxvq(a, b);
176 #else /* POLYMORPHIC */
177 return vmaxvq_u32(a, b);
178 #endif /* POLYMORPHIC */
181 // CHECK-LABEL: @test_vminavq_s8(
182 // CHECK-NEXT: entry:
183 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
184 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.minav.v16i8(i32 [[TMP0]], <16 x i8> [[B:%.*]])
185 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
186 // CHECK-NEXT: ret i8 [[TMP2]]
188 uint8_t test_vminavq_s8(uint8_t a, int8x16_t b) {
189 #ifdef POLYMORPHIC
190 return vminavq(a, b);
191 #else /* POLYMORPHIC */
192 return vminavq_s8(a, b);
193 #endif /* POLYMORPHIC */
196 // CHECK-LABEL: @test_vminavq_s16(
197 // CHECK-NEXT: entry:
198 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
199 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.minav.v8i16(i32 [[TMP0]], <8 x i16> [[B:%.*]])
200 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
201 // CHECK-NEXT: ret i16 [[TMP2]]
203 uint16_t test_vminavq_s16(uint16_t a, int16x8_t b) {
204 #ifdef POLYMORPHIC
205 return vminavq(a, b);
206 #else /* POLYMORPHIC */
207 return vminavq_s16(a, b);
208 #endif /* POLYMORPHIC */
211 // CHECK-LABEL: @test_vminavq_s32(
212 // CHECK-NEXT: entry:
213 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.minav.v4i32(i32 [[A:%.*]], <4 x i32> [[B:%.*]])
214 // CHECK-NEXT: ret i32 [[TMP0]]
216 uint32_t test_vminavq_s32(uint32_t a, int32x4_t b) {
217 #ifdef POLYMORPHIC
218 return vminavq(a, b);
219 #else /* POLYMORPHIC */
220 return vminavq_s32(a, b);
221 #endif /* POLYMORPHIC */
224 // CHECK-LABEL: @test_vmaxavq_s8(
225 // CHECK-NEXT: entry:
226 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
227 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.maxav.v16i8(i32 [[TMP0]], <16 x i8> [[B:%.*]])
228 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i8
229 // CHECK-NEXT: ret i8 [[TMP2]]
231 uint8_t test_vmaxavq_s8(uint8_t a, int8x16_t b) {
232 #ifdef POLYMORPHIC
233 return vmaxavq(a, b);
234 #else /* POLYMORPHIC */
235 return vmaxavq_s8(a, b);
236 #endif /* POLYMORPHIC */
239 // CHECK-LABEL: @test_vmaxavq_s16(
240 // CHECK-NEXT: entry:
241 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
242 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.maxav.v8i16(i32 [[TMP0]], <8 x i16> [[B:%.*]])
243 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
244 // CHECK-NEXT: ret i16 [[TMP2]]
246 uint16_t test_vmaxavq_s16(uint16_t a, int16x8_t b) {
247 #ifdef POLYMORPHIC
248 return vmaxavq(a, b);
249 #else /* POLYMORPHIC */
250 return vmaxavq_s16(a, b);
251 #endif /* POLYMORPHIC */
254 // CHECK-LABEL: @test_vmaxavq_s32(
255 // CHECK-NEXT: entry:
256 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.maxav.v4i32(i32 [[A:%.*]], <4 x i32> [[B:%.*]])
257 // CHECK-NEXT: ret i32 [[TMP0]]
259 uint32_t test_vmaxavq_s32(uint32_t a, int32x4_t b) {
260 #ifdef POLYMORPHIC
261 return vmaxavq(a, b);
262 #else /* POLYMORPHIC */
263 return vmaxavq_s32(a, b);
264 #endif /* POLYMORPHIC */
267 // CHECK-LABEL: @test_vminnmvq_f16(
268 // CHECK-NEXT: entry:
269 // CHECK-NEXT: [[TMP0:%.*]] = call half @llvm.arm.mve.minnmv.f16.v8f16(half [[A:%.*]], <8 x half> [[B:%.*]])
270 // CHECK-NEXT: ret half [[TMP0]]
272 float16_t test_vminnmvq_f16(float16_t a, float16x8_t b) {
273 #ifdef POLYMORPHIC
274 return vminnmvq(a, b);
275 #else /* POLYMORPHIC */
276 return vminnmvq_f16(a, b);
277 #endif /* POLYMORPHIC */
280 // CHECK-LABEL: @test_vminnmvq_f32(
281 // CHECK-NEXT: entry:
282 // CHECK-NEXT: [[TMP0:%.*]] = call float @llvm.arm.mve.minnmv.f32.v4f32(float [[A:%.*]], <4 x float> [[B:%.*]])
283 // CHECK-NEXT: ret float [[TMP0]]
285 float32_t test_vminnmvq_f32(float32_t a, float32x4_t b) {
286 #ifdef POLYMORPHIC
287 return vminnmvq(a, b);
288 #else /* POLYMORPHIC */
289 return vminnmvq_f32(a, b);
290 #endif /* POLYMORPHIC */
293 // CHECK-LABEL: @test_vminnmavq_f16(
294 // CHECK-NEXT: entry:
295 // CHECK-NEXT: [[TMP0:%.*]] = call half @llvm.arm.mve.minnmav.f16.v8f16(half [[A:%.*]], <8 x half> [[B:%.*]])
296 // CHECK-NEXT: ret half [[TMP0]]
298 float16_t test_vminnmavq_f16(float16_t a, float16x8_t b) {
299 #ifdef POLYMORPHIC
300 return vminnmavq(a, b);
301 #else /* POLYMORPHIC */
302 return vminnmavq_f16(a, b);
303 #endif /* POLYMORPHIC */
306 // CHECK-LABEL: @test_vminnmavq_f32(
307 // CHECK-NEXT: entry:
308 // CHECK-NEXT: [[TMP0:%.*]] = call float @llvm.arm.mve.minnmav.f32.v4f32(float [[A:%.*]], <4 x float> [[B:%.*]])
309 // CHECK-NEXT: ret float [[TMP0]]
311 float32_t test_vminnmavq_f32(float32_t a, float32x4_t b) {
312 #ifdef POLYMORPHIC
313 return vminnmavq(a, b);
314 #else /* POLYMORPHIC */
315 return vminnmavq_f32(a, b);
316 #endif /* POLYMORPHIC */
319 // CHECK-LABEL: @test_vmaxnmvq_f16(
320 // CHECK-NEXT: entry:
321 // CHECK-NEXT: [[TMP0:%.*]] = call half @llvm.arm.mve.maxnmv.f16.v8f16(half [[A:%.*]], <8 x half> [[B:%.*]])
322 // CHECK-NEXT: ret half [[TMP0]]
324 float16_t test_vmaxnmvq_f16(float16_t a, float16x8_t b) {
325 #ifdef POLYMORPHIC
326 return vmaxnmvq(a, b);
327 #else /* POLYMORPHIC */
328 return vmaxnmvq_f16(a, b);
329 #endif /* POLYMORPHIC */
332 // CHECK-LABEL: @test_vmaxnmvq_f32(
333 // CHECK-NEXT: entry:
334 // CHECK-NEXT: [[TMP0:%.*]] = call float @llvm.arm.mve.maxnmv.f32.v4f32(float [[A:%.*]], <4 x float> [[B:%.*]])
335 // CHECK-NEXT: ret float [[TMP0]]
337 float32_t test_vmaxnmvq_f32(float32_t a, float32x4_t b) {
338 #ifdef POLYMORPHIC
339 return vmaxnmvq(a, b);
340 #else /* POLYMORPHIC */
341 return vmaxnmvq_f32(a, b);
342 #endif /* POLYMORPHIC */
345 // CHECK-LABEL: @test_vmaxnmavq_f16(
346 // CHECK-NEXT: entry:
347 // CHECK-NEXT: [[TMP0:%.*]] = call half @llvm.arm.mve.maxnmav.f16.v8f16(half [[A:%.*]], <8 x half> [[B:%.*]])
348 // CHECK-NEXT: ret half [[TMP0]]
350 float16_t test_vmaxnmavq_f16(float16_t a, float16x8_t b) {
351 #ifdef POLYMORPHIC
352 return vmaxnmavq(a, b);
353 #else /* POLYMORPHIC */
354 return vmaxnmavq_f16(a, b);
355 #endif /* POLYMORPHIC */
358 // CHECK-LABEL: @test_vmaxnmavq_f32(
359 // CHECK-NEXT: entry:
360 // CHECK-NEXT: [[TMP0:%.*]] = call float @llvm.arm.mve.maxnmav.f32.v4f32(float [[A:%.*]], <4 x float> [[B:%.*]])
361 // CHECK-NEXT: ret float [[TMP0]]
363 float32_t test_vmaxnmavq_f32(float32_t a, float32x4_t b) {
364 #ifdef POLYMORPHIC
365 return vmaxnmavq(a, b);
366 #else /* POLYMORPHIC */
367 return vmaxnmavq_f32(a, b);
368 #endif /* POLYMORPHIC */
371 // CHECK-LABEL: @test_vminvq_p_s8(
372 // CHECK-NEXT: entry:
373 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
374 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
375 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
376 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.minv.predicated.v16i8.v16i1(i32 [[TMP0]], <16 x i8> [[B:%.*]], i32 0, <16 x i1> [[TMP2]])
377 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8
378 // CHECK-NEXT: ret i8 [[TMP4]]
380 int8_t test_vminvq_p_s8(int8_t a, int8x16_t b, mve_pred16_t p) {
381 #ifdef POLYMORPHIC
382 return vminvq_p(a, b, p);
383 #else /* POLYMORPHIC */
384 return vminvq_p_s8(a, b, p);
385 #endif /* POLYMORPHIC */
388 // CHECK-LABEL: @test_vminvq_p_s16(
389 // CHECK-NEXT: entry:
390 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
391 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
392 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
393 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.minv.predicated.v8i16.v8i1(i32 [[TMP0]], <8 x i16> [[B:%.*]], i32 0, <8 x i1> [[TMP2]])
394 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16
395 // CHECK-NEXT: ret i16 [[TMP4]]
397 int16_t test_vminvq_p_s16(int16_t a, int16x8_t b, mve_pred16_t p) {
398 #ifdef POLYMORPHIC
399 return vminvq_p(a, b, p);
400 #else /* POLYMORPHIC */
401 return vminvq_p_s16(a, b, p);
402 #endif /* POLYMORPHIC */
405 // CHECK-LABEL: @test_vminvq_p_s32(
406 // CHECK-NEXT: entry:
407 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
408 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
409 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.minv.predicated.v4i32.v4i1(i32 [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, <4 x i1> [[TMP1]])
410 // CHECK-NEXT: ret i32 [[TMP2]]
412 int32_t test_vminvq_p_s32(int32_t a, int32x4_t b, mve_pred16_t p) {
413 #ifdef POLYMORPHIC
414 return vminvq_p(a, b, p);
415 #else /* POLYMORPHIC */
416 return vminvq_p_s32(a, b, p);
417 #endif /* POLYMORPHIC */
420 // CHECK-LABEL: @test_vminvq_p_u8(
421 // CHECK-NEXT: entry:
422 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
423 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
424 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
425 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.minv.predicated.v16i8.v16i1(i32 [[TMP0]], <16 x i8> [[B:%.*]], i32 1, <16 x i1> [[TMP2]])
426 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8
427 // CHECK-NEXT: ret i8 [[TMP4]]
429 uint8_t test_vminvq_p_u8(uint8_t a, uint8x16_t b, mve_pred16_t p) {
430 #ifdef POLYMORPHIC
431 return vminvq_p(a, b, p);
432 #else /* POLYMORPHIC */
433 return vminvq_p_u8(a, b, p);
434 #endif /* POLYMORPHIC */
437 // CHECK-LABEL: @test_vminvq_p_u16(
438 // CHECK-NEXT: entry:
439 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
440 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
441 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
442 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.minv.predicated.v8i16.v8i1(i32 [[TMP0]], <8 x i16> [[B:%.*]], i32 1, <8 x i1> [[TMP2]])
443 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16
444 // CHECK-NEXT: ret i16 [[TMP4]]
446 uint16_t test_vminvq_p_u16(uint16_t a, uint16x8_t b, mve_pred16_t p) {
447 #ifdef POLYMORPHIC
448 return vminvq_p(a, b, p);
449 #else /* POLYMORPHIC */
450 return vminvq_p_u16(a, b, p);
451 #endif /* POLYMORPHIC */
454 // CHECK-LABEL: @test_vminvq_p_u32(
455 // CHECK-NEXT: entry:
456 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
457 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
458 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.minv.predicated.v4i32.v4i1(i32 [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, <4 x i1> [[TMP1]])
459 // CHECK-NEXT: ret i32 [[TMP2]]
461 uint32_t test_vminvq_p_u32(uint32_t a, uint32x4_t b, mve_pred16_t p) {
462 #ifdef POLYMORPHIC
463 return vminvq_p(a, b, p);
464 #else /* POLYMORPHIC */
465 return vminvq_p_u32(a, b, p);
466 #endif /* POLYMORPHIC */
469 // CHECK-LABEL: @test_vmaxvq_p_s8(
470 // CHECK-NEXT: entry:
471 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
472 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
473 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
474 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.maxv.predicated.v16i8.v16i1(i32 [[TMP0]], <16 x i8> [[B:%.*]], i32 0, <16 x i1> [[TMP2]])
475 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8
476 // CHECK-NEXT: ret i8 [[TMP4]]
478 int8_t test_vmaxvq_p_s8(int8_t a, int8x16_t b, mve_pred16_t p) {
479 #ifdef POLYMORPHIC
480 return vmaxvq_p(a, b, p);
481 #else /* POLYMORPHIC */
482 return vmaxvq_p_s8(a, b, p);
483 #endif /* POLYMORPHIC */
486 // CHECK-LABEL: @test_vmaxvq_p_s16(
487 // CHECK-NEXT: entry:
488 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
489 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
490 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
491 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.maxv.predicated.v8i16.v8i1(i32 [[TMP0]], <8 x i16> [[B:%.*]], i32 0, <8 x i1> [[TMP2]])
492 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16
493 // CHECK-NEXT: ret i16 [[TMP4]]
495 int16_t test_vmaxvq_p_s16(int16_t a, int16x8_t b, mve_pred16_t p) {
496 #ifdef POLYMORPHIC
497 return vmaxvq_p(a, b, p);
498 #else /* POLYMORPHIC */
499 return vmaxvq_p_s16(a, b, p);
500 #endif /* POLYMORPHIC */
503 // CHECK-LABEL: @test_vmaxvq_p_s32(
504 // CHECK-NEXT: entry:
505 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
506 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
507 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.maxv.predicated.v4i32.v4i1(i32 [[A:%.*]], <4 x i32> [[B:%.*]], i32 0, <4 x i1> [[TMP1]])
508 // CHECK-NEXT: ret i32 [[TMP2]]
510 int32_t test_vmaxvq_p_s32(int32_t a, int32x4_t b, mve_pred16_t p) {
511 #ifdef POLYMORPHIC
512 return vmaxvq_p(a, b, p);
513 #else /* POLYMORPHIC */
514 return vmaxvq_p_s32(a, b, p);
515 #endif /* POLYMORPHIC */
518 // CHECK-LABEL: @test_vmaxvq_p_u8(
519 // CHECK-NEXT: entry:
520 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
521 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
522 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
523 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.maxv.predicated.v16i8.v16i1(i32 [[TMP0]], <16 x i8> [[B:%.*]], i32 1, <16 x i1> [[TMP2]])
524 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8
525 // CHECK-NEXT: ret i8 [[TMP4]]
527 uint8_t test_vmaxvq_p_u8(uint8_t a, uint8x16_t b, mve_pred16_t p) {
528 #ifdef POLYMORPHIC
529 return vmaxvq_p(a, b, p);
530 #else /* POLYMORPHIC */
531 return vmaxvq_p_u8(a, b, p);
532 #endif /* POLYMORPHIC */
535 // CHECK-LABEL: @test_vmaxvq_p_u16(
536 // CHECK-NEXT: entry:
537 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
538 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
539 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
540 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.maxv.predicated.v8i16.v8i1(i32 [[TMP0]], <8 x i16> [[B:%.*]], i32 1, <8 x i1> [[TMP2]])
541 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16
542 // CHECK-NEXT: ret i16 [[TMP4]]
544 uint16_t test_vmaxvq_p_u16(uint16_t a, uint16x8_t b, mve_pred16_t p) {
545 #ifdef POLYMORPHIC
546 return vmaxvq_p(a, b, p);
547 #else /* POLYMORPHIC */
548 return vmaxvq_p_u16(a, b, p);
549 #endif /* POLYMORPHIC */
552 // CHECK-LABEL: @test_vmaxvq_p_u32(
553 // CHECK-NEXT: entry:
554 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
555 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
556 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.maxv.predicated.v4i32.v4i1(i32 [[A:%.*]], <4 x i32> [[B:%.*]], i32 1, <4 x i1> [[TMP1]])
557 // CHECK-NEXT: ret i32 [[TMP2]]
559 uint32_t test_vmaxvq_p_u32(uint32_t a, uint32x4_t b, mve_pred16_t p) {
560 #ifdef POLYMORPHIC
561 return vmaxvq_p(a, b, p);
562 #else /* POLYMORPHIC */
563 return vmaxvq_p_u32(a, b, p);
564 #endif /* POLYMORPHIC */
567 // CHECK-LABEL: @test_vminavq_p_s8(
568 // CHECK-NEXT: entry:
569 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
570 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
571 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
572 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.minav.predicated.v16i8.v16i1(i32 [[TMP0]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP2]])
573 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8
574 // CHECK-NEXT: ret i8 [[TMP4]]
576 uint8_t test_vminavq_p_s8(uint8_t a, int8x16_t b, mve_pred16_t p) {
577 #ifdef POLYMORPHIC
578 return vminavq_p(a, b, p);
579 #else /* POLYMORPHIC */
580 return vminavq_p_s8(a, b, p);
581 #endif /* POLYMORPHIC */
584 // CHECK-LABEL: @test_vminavq_p_s16(
585 // CHECK-NEXT: entry:
586 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
587 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
588 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
589 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.minav.predicated.v8i16.v8i1(i32 [[TMP0]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP2]])
590 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16
591 // CHECK-NEXT: ret i16 [[TMP4]]
593 uint16_t test_vminavq_p_s16(uint16_t a, int16x8_t b, mve_pred16_t p) {
594 #ifdef POLYMORPHIC
595 return vminavq_p(a, b, p);
596 #else /* POLYMORPHIC */
597 return vminavq_p_s16(a, b, p);
598 #endif /* POLYMORPHIC */
601 // CHECK-LABEL: @test_vminavq_p_s32(
602 // CHECK-NEXT: entry:
603 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
604 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
605 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.minav.predicated.v4i32.v4i1(i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
606 // CHECK-NEXT: ret i32 [[TMP2]]
608 uint32_t test_vminavq_p_s32(uint32_t a, int32x4_t b, mve_pred16_t p) {
609 #ifdef POLYMORPHIC
610 return vminavq_p(a, b, p);
611 #else /* POLYMORPHIC */
612 return vminavq_p_s32(a, b, p);
613 #endif /* POLYMORPHIC */
616 // CHECK-LABEL: @test_vmaxavq_p_s8(
617 // CHECK-NEXT: entry:
618 // CHECK-NEXT: [[TMP0:%.*]] = zext i8 [[A:%.*]] to i32
619 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
620 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP1]])
621 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.maxav.predicated.v16i8.v16i1(i32 [[TMP0]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP2]])
622 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8
623 // CHECK-NEXT: ret i8 [[TMP4]]
625 uint8_t test_vmaxavq_p_s8(uint8_t a, int8x16_t b, mve_pred16_t p) {
626 #ifdef POLYMORPHIC
627 return vmaxavq_p(a, b, p);
628 #else /* POLYMORPHIC */
629 return vmaxavq_p_s8(a, b, p);
630 #endif /* POLYMORPHIC */
633 // CHECK-LABEL: @test_vmaxavq_p_s16(
634 // CHECK-NEXT: entry:
635 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[A:%.*]] to i32
636 // CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[P:%.*]] to i32
637 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP1]])
638 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.arm.mve.maxav.predicated.v8i16.v8i1(i32 [[TMP0]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP2]])
639 // CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i16
640 // CHECK-NEXT: ret i16 [[TMP4]]
642 uint16_t test_vmaxavq_p_s16(uint16_t a, int16x8_t b, mve_pred16_t p) {
643 #ifdef POLYMORPHIC
644 return vmaxavq_p(a, b, p);
645 #else /* POLYMORPHIC */
646 return vmaxavq_p_s16(a, b, p);
647 #endif /* POLYMORPHIC */
650 // CHECK-LABEL: @test_vmaxavq_p_s32(
651 // CHECK-NEXT: entry:
652 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
653 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
654 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.maxav.predicated.v4i32.v4i1(i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
655 // CHECK-NEXT: ret i32 [[TMP2]]
657 uint32_t test_vmaxavq_p_s32(uint32_t a, int32x4_t b, mve_pred16_t p) {
658 #ifdef POLYMORPHIC
659 return vmaxavq_p(a, b, p);
660 #else /* POLYMORPHIC */
661 return vmaxavq_p_s32(a, b, p);
662 #endif /* POLYMORPHIC */
665 // CHECK-LABEL: @test_vminnmvq_p_f16(
666 // CHECK-NEXT: entry:
667 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
668 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
669 // CHECK-NEXT: [[TMP2:%.*]] = call half @llvm.arm.mve.minnmv.predicated.f16.v8f16.v8i1(half [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
670 // CHECK-NEXT: ret half [[TMP2]]
672 float16_t test_vminnmvq_p_f16(float16_t a, float16x8_t b, mve_pred16_t p) {
673 #ifdef POLYMORPHIC
674 return vminnmvq_p(a, b, p);
675 #else /* POLYMORPHIC */
676 return vminnmvq_p_f16(a, b, p);
677 #endif /* POLYMORPHIC */
680 // CHECK-LABEL: @test_vminnmvq_p_f32(
681 // CHECK-NEXT: entry:
682 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
683 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
684 // CHECK-NEXT: [[TMP2:%.*]] = call float @llvm.arm.mve.minnmv.predicated.f32.v4f32.v4i1(float [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
685 // CHECK-NEXT: ret float [[TMP2]]
687 float32_t test_vminnmvq_p_f32(float32_t a, float32x4_t b, mve_pred16_t p) {
688 #ifdef POLYMORPHIC
689 return vminnmvq_p(a, b, p);
690 #else /* POLYMORPHIC */
691 return vminnmvq_p_f32(a, b, p);
692 #endif /* POLYMORPHIC */
695 // CHECK-LABEL: @test_vminnmavq_p_f16(
696 // CHECK-NEXT: entry:
697 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
698 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
699 // CHECK-NEXT: [[TMP2:%.*]] = call half @llvm.arm.mve.minnmav.predicated.f16.v8f16.v8i1(half [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
700 // CHECK-NEXT: ret half [[TMP2]]
702 float16_t test_vminnmavq_p_f16(float16_t a, float16x8_t b, mve_pred16_t p) {
703 #ifdef POLYMORPHIC
704 return vminnmavq_p(a, b, p);
705 #else /* POLYMORPHIC */
706 return vminnmavq_p_f16(a, b, p);
707 #endif /* POLYMORPHIC */
710 // CHECK-LABEL: @test_vminnmavq_p_f32(
711 // CHECK-NEXT: entry:
712 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
713 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
714 // CHECK-NEXT: [[TMP2:%.*]] = call float @llvm.arm.mve.minnmav.predicated.f32.v4f32.v4i1(float [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
715 // CHECK-NEXT: ret float [[TMP2]]
717 float32_t test_vminnmavq_p_f32(float32_t a, float32x4_t b, mve_pred16_t p) {
718 #ifdef POLYMORPHIC
719 return vminnmavq_p(a, b, p);
720 #else /* POLYMORPHIC */
721 return vminnmavq_p_f32(a, b, p);
722 #endif /* POLYMORPHIC */
725 // CHECK-LABEL: @test_vmaxnmvq_p_f16(
726 // CHECK-NEXT: entry:
727 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
728 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
729 // CHECK-NEXT: [[TMP2:%.*]] = call half @llvm.arm.mve.maxnmv.predicated.f16.v8f16.v8i1(half [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
730 // CHECK-NEXT: ret half [[TMP2]]
732 float16_t test_vmaxnmvq_p_f16(float16_t a, float16x8_t b, mve_pred16_t p) {
733 #ifdef POLYMORPHIC
734 return vmaxnmvq_p(a, b, p);
735 #else /* POLYMORPHIC */
736 return vmaxnmvq_p_f16(a, b, p);
737 #endif /* POLYMORPHIC */
740 // CHECK-LABEL: @test_vmaxnmvq_p_f32(
741 // CHECK-NEXT: entry:
742 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
743 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
744 // CHECK-NEXT: [[TMP2:%.*]] = call float @llvm.arm.mve.maxnmv.predicated.f32.v4f32.v4i1(float [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
745 // CHECK-NEXT: ret float [[TMP2]]
747 float32_t test_vmaxnmvq_p_f32(float32_t a, float32x4_t b, mve_pred16_t p) {
748 #ifdef POLYMORPHIC
749 return vmaxnmvq_p(a, b, p);
750 #else /* POLYMORPHIC */
751 return vmaxnmvq_p_f32(a, b, p);
752 #endif /* POLYMORPHIC */
755 // CHECK-LABEL: @test_vmaxnmavq_p_f16(
756 // CHECK-NEXT: entry:
757 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
758 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
759 // CHECK-NEXT: [[TMP2:%.*]] = call half @llvm.arm.mve.maxnmav.predicated.f16.v8f16.v8i1(half [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
760 // CHECK-NEXT: ret half [[TMP2]]
762 float16_t test_vmaxnmavq_p_f16(float16_t a, float16x8_t b, mve_pred16_t p) {
763 #ifdef POLYMORPHIC
764 return vmaxnmavq_p(a, b, p);
765 #else /* POLYMORPHIC */
766 return vmaxnmavq_p_f16(a, b, p);
767 #endif /* POLYMORPHIC */
770 // CHECK-LABEL: @test_vmaxnmavq_p_f32(
771 // CHECK-NEXT: entry:
772 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
773 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
774 // CHECK-NEXT: [[TMP2:%.*]] = call float @llvm.arm.mve.maxnmav.predicated.f32.v4f32.v4i1(float [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
775 // CHECK-NEXT: ret float [[TMP2]]
777 float32_t test_vmaxnmavq_p_f32(float32_t a, float32x4_t b, mve_pred16_t p) {
778 #ifdef POLYMORPHIC
779 return vmaxnmavq_p(a, b, p);
780 #else /* POLYMORPHIC */
781 return vmaxnmavq_p_f32(a, b, p);
782 #endif /* POLYMORPHIC */