1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 // CHECK-LABEL: @test_vabsq_f16(
11 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.fabs.v8f16(<8 x half> [[A:%.*]])
12 // CHECK-NEXT: ret <8 x half> [[TMP0]]
14 float16x8_t
test_vabsq_f16(float16x8_t a
)
18 #else /* POLYMORPHIC */
20 #endif /* POLYMORPHIC */
23 // CHECK-LABEL: @test_vabsq_f32(
25 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.fabs.v4f32(<4 x float> [[A:%.*]])
26 // CHECK-NEXT: ret <4 x float> [[TMP0]]
28 float32x4_t
test_vabsq_f32(float32x4_t a
)
32 #else /* POLYMORPHIC */
34 #endif /* POLYMORPHIC */
37 // CHECK-LABEL: @test_vabsq_s8(
39 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <16 x i8> [[A:%.*]], zeroinitializer
40 // CHECK-NEXT: [[TMP1:%.*]] = sub <16 x i8> zeroinitializer, [[A]]
41 // CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP0]], <16 x i8> [[TMP1]], <16 x i8> [[A]]
42 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
44 int8x16_t
test_vabsq_s8(int8x16_t a
)
48 #else /* POLYMORPHIC */
50 #endif /* POLYMORPHIC */
53 // CHECK-LABEL: @test_vabsq_s16(
55 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <8 x i16> [[A:%.*]], zeroinitializer
56 // CHECK-NEXT: [[TMP1:%.*]] = sub <8 x i16> zeroinitializer, [[A]]
57 // CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[TMP1]], <8 x i16> [[A]]
58 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
60 int16x8_t
test_vabsq_s16(int16x8_t a
)
64 #else /* POLYMORPHIC */
66 #endif /* POLYMORPHIC */
69 // CHECK-LABEL: @test_vabsq_s32(
71 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <4 x i32> [[A:%.*]], zeroinitializer
72 // CHECK-NEXT: [[TMP1:%.*]] = sub <4 x i32> zeroinitializer, [[A]]
73 // CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[TMP1]], <4 x i32> [[A]]
74 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
76 int32x4_t
test_vabsq_s32(int32x4_t a
)
80 #else /* POLYMORPHIC */
82 #endif /* POLYMORPHIC */
85 // CHECK-LABEL: @test_vmvnq_s8(
87 // CHECK-NEXT: [[TMP0:%.*]] = xor <16 x i8> [[A:%.*]], splat (i8 -1)
88 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
90 int8x16_t
test_vmvnq_s8(int8x16_t a
)
94 #else /* POLYMORPHIC */
96 #endif /* POLYMORPHIC */
99 // CHECK-LABEL: @test_vmvnq_s16(
100 // CHECK-NEXT: entry:
101 // CHECK-NEXT: [[TMP0:%.*]] = xor <8 x i16> [[A:%.*]], splat (i16 -1)
102 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
104 int16x8_t
test_vmvnq_s16(int16x8_t a
)
108 #else /* POLYMORPHIC */
110 #endif /* POLYMORPHIC */
113 // CHECK-LABEL: @test_vmvnq_s32(
114 // CHECK-NEXT: entry:
115 // CHECK-NEXT: [[TMP0:%.*]] = xor <4 x i32> [[A:%.*]], splat (i32 -1)
116 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
118 int32x4_t
test_vmvnq_s32(int32x4_t a
)
122 #else /* POLYMORPHIC */
124 #endif /* POLYMORPHIC */
127 // CHECK-LABEL: @test_vmvnq_u8(
128 // CHECK-NEXT: entry:
129 // CHECK-NEXT: [[TMP0:%.*]] = xor <16 x i8> [[A:%.*]], splat (i8 -1)
130 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
132 uint8x16_t
test_vmvnq_u8(uint8x16_t a
)
136 #else /* POLYMORPHIC */
138 #endif /* POLYMORPHIC */
141 // CHECK-LABEL: @test_vmvnq_u16(
142 // CHECK-NEXT: entry:
143 // CHECK-NEXT: [[TMP0:%.*]] = xor <8 x i16> [[A:%.*]], splat (i16 -1)
144 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
146 uint16x8_t
test_vmvnq_u16(uint16x8_t a
)
150 #else /* POLYMORPHIC */
152 #endif /* POLYMORPHIC */
155 // CHECK-LABEL: @test_vmvnq_u32(
156 // CHECK-NEXT: entry:
157 // CHECK-NEXT: [[TMP0:%.*]] = xor <4 x i32> [[A:%.*]], splat (i32 -1)
158 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
160 uint32x4_t
test_vmvnq_u32(uint32x4_t a
)
164 #else /* POLYMORPHIC */
166 #endif /* POLYMORPHIC */
169 // CHECK-LABEL: @test_vmvnq_m_s8(
170 // CHECK-NEXT: entry:
171 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
172 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
173 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
174 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
176 int8x16_t
test_vmvnq_m_s8(int8x16_t inactive
, int8x16_t a
, mve_pred16_t p
)
179 return vmvnq_m(inactive
, a
, p
);
180 #else /* POLYMORPHIC */
181 return vmvnq_m_s8(inactive
, a
, p
);
182 #endif /* POLYMORPHIC */
185 // CHECK-LABEL: @test_vmvnq_m_s16(
186 // CHECK-NEXT: entry:
187 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
188 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
189 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
190 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
192 int16x8_t
test_vmvnq_m_s16(int16x8_t inactive
, int16x8_t a
, mve_pred16_t p
)
195 return vmvnq_m(inactive
, a
, p
);
196 #else /* POLYMORPHIC */
197 return vmvnq_m_s16(inactive
, a
, p
);
198 #endif /* POLYMORPHIC */
201 // CHECK-LABEL: @test_vmvnq_m_s32(
202 // CHECK-NEXT: entry:
203 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
204 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
205 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
206 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
208 int32x4_t
test_vmvnq_m_s32(int32x4_t inactive
, int32x4_t a
, mve_pred16_t p
)
211 return vmvnq_m(inactive
, a
, p
);
212 #else /* POLYMORPHIC */
213 return vmvnq_m_s32(inactive
, a
, p
);
214 #endif /* POLYMORPHIC */
217 // CHECK-LABEL: @test_vmvnq_m_u8(
218 // CHECK-NEXT: entry:
219 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
220 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
221 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
222 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
224 uint8x16_t
test_vmvnq_m_u8(uint8x16_t inactive
, uint8x16_t a
, mve_pred16_t p
)
227 return vmvnq_m(inactive
, a
, p
);
228 #else /* POLYMORPHIC */
229 return vmvnq_m_u8(inactive
, a
, p
);
230 #endif /* POLYMORPHIC */
233 // CHECK-LABEL: @test_vmvnq_m_u16(
234 // CHECK-NEXT: entry:
235 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
236 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
237 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
238 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
240 uint16x8_t
test_vmvnq_m_u16(uint16x8_t inactive
, uint16x8_t a
, mve_pred16_t p
)
243 return vmvnq_m(inactive
, a
, p
);
244 #else /* POLYMORPHIC */
245 return vmvnq_m_u16(inactive
, a
, p
);
246 #endif /* POLYMORPHIC */
249 // CHECK-LABEL: @test_vmvnq_m_u32(
250 // CHECK-NEXT: entry:
251 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
252 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
253 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
254 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
256 uint32x4_t
test_vmvnq_m_u32(uint32x4_t inactive
, uint32x4_t a
, mve_pred16_t p
)
259 return vmvnq_m(inactive
, a
, p
);
260 #else /* POLYMORPHIC */
261 return vmvnq_m_u32(inactive
, a
, p
);
262 #endif /* POLYMORPHIC */
265 // CHECK-LABEL: @test_vmvnq_x_s8(
266 // CHECK-NEXT: entry:
267 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
268 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
269 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
270 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
272 int8x16_t
test_vmvnq_x_s8(int8x16_t a
, mve_pred16_t p
)
275 return vmvnq_x(a
, p
);
276 #else /* POLYMORPHIC */
277 return vmvnq_x_s8(a
, p
);
278 #endif /* POLYMORPHIC */
281 // CHECK-LABEL: @test_vmvnq_x_s16(
282 // CHECK-NEXT: entry:
283 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
284 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
285 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
286 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
288 int16x8_t
test_vmvnq_x_s16(int16x8_t a
, mve_pred16_t p
)
291 return vmvnq_x(a
, p
);
292 #else /* POLYMORPHIC */
293 return vmvnq_x_s16(a
, p
);
294 #endif /* POLYMORPHIC */
297 // CHECK-LABEL: @test_vmvnq_x_s32(
298 // CHECK-NEXT: entry:
299 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
300 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
301 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
302 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
304 int32x4_t
test_vmvnq_x_s32(int32x4_t a
, mve_pred16_t p
)
307 return vmvnq_x(a
, p
);
308 #else /* POLYMORPHIC */
309 return vmvnq_x_s32(a
, p
);
310 #endif /* POLYMORPHIC */
313 // CHECK-LABEL: @test_vmvnq_x_u8(
314 // CHECK-NEXT: entry:
315 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
316 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
317 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.mvn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
318 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
320 uint8x16_t
test_vmvnq_x_u8(uint8x16_t a
, mve_pred16_t p
)
323 return vmvnq_x(a
, p
);
324 #else /* POLYMORPHIC */
325 return vmvnq_x_u8(a
, p
);
326 #endif /* POLYMORPHIC */
329 // CHECK-LABEL: @test_vmvnq_x_u16(
330 // CHECK-NEXT: entry:
331 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
332 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
333 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.mvn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
334 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
336 uint16x8_t
test_vmvnq_x_u16(uint16x8_t a
, mve_pred16_t p
)
339 return vmvnq_x(a
, p
);
340 #else /* POLYMORPHIC */
341 return vmvnq_x_u16(a
, p
);
342 #endif /* POLYMORPHIC */
345 // CHECK-LABEL: @test_vmvnq_x_u32(
346 // CHECK-NEXT: entry:
347 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
348 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
349 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.mvn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
350 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
352 uint32x4_t
test_vmvnq_x_u32(uint32x4_t a
, mve_pred16_t p
)
355 return vmvnq_x(a
, p
);
356 #else /* POLYMORPHIC */
357 return vmvnq_x_u32(a
, p
);
358 #endif /* POLYMORPHIC */
361 // CHECK-LABEL: @test_vnegq_f16(
362 // CHECK-NEXT: entry:
363 // CHECK-NEXT: [[TMP0:%.*]] = fneg <8 x half> [[A:%.*]]
364 // CHECK-NEXT: ret <8 x half> [[TMP0]]
366 float16x8_t
test_vnegq_f16(float16x8_t a
)
370 #else /* POLYMORPHIC */
372 #endif /* POLYMORPHIC */
375 // CHECK-LABEL: @test_vnegq_f32(
376 // CHECK-NEXT: entry:
377 // CHECK-NEXT: [[TMP0:%.*]] = fneg <4 x float> [[A:%.*]]
378 // CHECK-NEXT: ret <4 x float> [[TMP0]]
380 float32x4_t
test_vnegq_f32(float32x4_t a
)
384 #else /* POLYMORPHIC */
386 #endif /* POLYMORPHIC */
389 // CHECK-LABEL: @test_vnegq_s8(
390 // CHECK-NEXT: entry:
391 // CHECK-NEXT: [[TMP0:%.*]] = sub <16 x i8> zeroinitializer, [[A:%.*]]
392 // CHECK-NEXT: ret <16 x i8> [[TMP0]]
394 int8x16_t
test_vnegq_s8(int8x16_t a
)
398 #else /* POLYMORPHIC */
400 #endif /* POLYMORPHIC */
403 // CHECK-LABEL: @test_vnegq_s16(
404 // CHECK-NEXT: entry:
405 // CHECK-NEXT: [[TMP0:%.*]] = sub <8 x i16> zeroinitializer, [[A:%.*]]
406 // CHECK-NEXT: ret <8 x i16> [[TMP0]]
408 int16x8_t
test_vnegq_s16(int16x8_t a
)
412 #else /* POLYMORPHIC */
414 #endif /* POLYMORPHIC */
417 // CHECK-LABEL: @test_vnegq_s32(
418 // CHECK-NEXT: entry:
419 // CHECK-NEXT: [[TMP0:%.*]] = sub <4 x i32> zeroinitializer, [[A:%.*]]
420 // CHECK-NEXT: ret <4 x i32> [[TMP0]]
422 int32x4_t
test_vnegq_s32(int32x4_t a
)
426 #else /* POLYMORPHIC */
428 #endif /* POLYMORPHIC */
431 // CHECK-LABEL: @test_vqabsq_s8(
432 // CHECK-NEXT: entry:
433 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <16 x i8> [[A:%.*]], zeroinitializer
434 // CHECK-NEXT: [[TMP1:%.*]] = icmp eq <16 x i8> [[A]], splat (i8 -128)
435 // CHECK-NEXT: [[TMP2:%.*]] = sub <16 x i8> zeroinitializer, [[A]]
436 // CHECK-NEXT: [[TMP3:%.*]] = select <16 x i1> [[TMP1]], <16 x i8> splat (i8 127), <16 x i8> [[TMP2]]
437 // CHECK-NEXT: [[TMP4:%.*]] = select <16 x i1> [[TMP0]], <16 x i8> [[A]], <16 x i8> [[TMP3]]
438 // CHECK-NEXT: ret <16 x i8> [[TMP4]]
440 int8x16_t
test_vqabsq_s8(int8x16_t a
)
444 #else /* POLYMORPHIC */
446 #endif /* POLYMORPHIC */
449 // CHECK-LABEL: @test_vqabsq_s16(
450 // CHECK-NEXT: entry:
451 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <8 x i16> [[A:%.*]], zeroinitializer
452 // CHECK-NEXT: [[TMP1:%.*]] = icmp eq <8 x i16> [[A]], splat (i16 -32768)
453 // CHECK-NEXT: [[TMP2:%.*]] = sub <8 x i16> zeroinitializer, [[A]]
454 // CHECK-NEXT: [[TMP3:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> splat (i16 32767), <8 x i16> [[TMP2]]
455 // CHECK-NEXT: [[TMP4:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[A]], <8 x i16> [[TMP3]]
456 // CHECK-NEXT: ret <8 x i16> [[TMP4]]
458 int16x8_t
test_vqabsq_s16(int16x8_t a
)
462 #else /* POLYMORPHIC */
463 return vqabsq_s16(a
);
464 #endif /* POLYMORPHIC */
467 // CHECK-LABEL: @test_vqabsq_s32(
468 // CHECK-NEXT: entry:
469 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <4 x i32> [[A:%.*]], zeroinitializer
470 // CHECK-NEXT: [[TMP1:%.*]] = icmp eq <4 x i32> [[A]], splat (i32 -2147483648)
471 // CHECK-NEXT: [[TMP2:%.*]] = sub <4 x i32> zeroinitializer, [[A]]
472 // CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> splat (i32 2147483647), <4 x i32> [[TMP2]]
473 // CHECK-NEXT: [[TMP4:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> [[A]], <4 x i32> [[TMP3]]
474 // CHECK-NEXT: ret <4 x i32> [[TMP4]]
476 int32x4_t
test_vqabsq_s32(int32x4_t a
)
480 #else /* POLYMORPHIC */
481 return vqabsq_s32(a
);
482 #endif /* POLYMORPHIC */
485 // CHECK-LABEL: @test_vqnegq_s8(
486 // CHECK-NEXT: entry:
487 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], splat (i8 -128)
488 // CHECK-NEXT: [[TMP1:%.*]] = sub <16 x i8> zeroinitializer, [[A]]
489 // CHECK-NEXT: [[TMP2:%.*]] = select <16 x i1> [[TMP0]], <16 x i8> splat (i8 127), <16 x i8> [[TMP1]]
490 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
492 int8x16_t
test_vqnegq_s8(int8x16_t a
)
496 #else /* POLYMORPHIC */
498 #endif /* POLYMORPHIC */
501 // CHECK-LABEL: @test_vqnegq_s16(
502 // CHECK-NEXT: entry:
503 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], splat (i16 -32768)
504 // CHECK-NEXT: [[TMP1:%.*]] = sub <8 x i16> zeroinitializer, [[A]]
505 // CHECK-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> splat (i16 32767), <8 x i16> [[TMP1]]
506 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
508 int16x8_t
test_vqnegq_s16(int16x8_t a
)
512 #else /* POLYMORPHIC */
513 return vqnegq_s16(a
);
514 #endif /* POLYMORPHIC */
517 // CHECK-LABEL: @test_vqnegq_s32(
518 // CHECK-NEXT: entry:
519 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], splat (i32 -2147483648)
520 // CHECK-NEXT: [[TMP1:%.*]] = sub <4 x i32> zeroinitializer, [[A]]
521 // CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP0]], <4 x i32> splat (i32 2147483647), <4 x i32> [[TMP1]]
522 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
524 int32x4_t
test_vqnegq_s32(int32x4_t a
)
528 #else /* POLYMORPHIC */
529 return vqnegq_s32(a
);
530 #endif /* POLYMORPHIC */
533 // CHECK-LABEL: @test_vnegq_m_f16(
534 // CHECK-NEXT: entry:
535 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
536 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
537 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.neg.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
538 // CHECK-NEXT: ret <8 x half> [[TMP2]]
540 float16x8_t
test_vnegq_m_f16(float16x8_t inactive
, float16x8_t a
, mve_pred16_t p
)
543 return vnegq_m(inactive
, a
, p
);
544 #else /* POLYMORPHIC */
545 return vnegq_m_f16(inactive
, a
, p
);
546 #endif /* POLYMORPHIC */
549 // CHECK-LABEL: @test_vnegq_m_f32(
550 // CHECK-NEXT: entry:
551 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
552 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
553 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.neg.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
554 // CHECK-NEXT: ret <4 x float> [[TMP2]]
556 float32x4_t
test_vnegq_m_f32(float32x4_t inactive
, float32x4_t a
, mve_pred16_t p
)
559 return vnegq_m(inactive
, a
, p
);
560 #else /* POLYMORPHIC */
561 return vnegq_m_f32(inactive
, a
, p
);
562 #endif /* POLYMORPHIC */
565 // CHECK-LABEL: @test_vnegq_m_s8(
566 // CHECK-NEXT: entry:
567 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
568 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
569 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.neg.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
570 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
572 int8x16_t
test_vnegq_m_s8(int8x16_t inactive
, int8x16_t a
, mve_pred16_t p
)
575 return vnegq_m(inactive
, a
, p
);
576 #else /* POLYMORPHIC */
577 return vnegq_m_s8(inactive
, a
, p
);
578 #endif /* POLYMORPHIC */
581 // CHECK-LABEL: @test_vnegq_m_s16(
582 // CHECK-NEXT: entry:
583 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
584 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
585 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.neg.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
586 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
588 int16x8_t
test_vnegq_m_s16(int16x8_t inactive
, int16x8_t a
, mve_pred16_t p
)
591 return vnegq_m(inactive
, a
, p
);
592 #else /* POLYMORPHIC */
593 return vnegq_m_s16(inactive
, a
, p
);
594 #endif /* POLYMORPHIC */
597 // CHECK-LABEL: @test_vnegq_m_s32(
598 // CHECK-NEXT: entry:
599 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
600 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
601 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.neg.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
602 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
604 int32x4_t
test_vnegq_m_s32(int32x4_t inactive
, int32x4_t a
, mve_pred16_t p
)
607 return vnegq_m(inactive
, a
, p
);
608 #else /* POLYMORPHIC */
609 return vnegq_m_s32(inactive
, a
, p
);
610 #endif /* POLYMORPHIC */
613 // CHECK-LABEL: @test_vnegq_x_f16(
614 // CHECK-NEXT: entry:
615 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
616 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
617 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.neg.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
618 // CHECK-NEXT: ret <8 x half> [[TMP2]]
620 float16x8_t
test_vnegq_x_f16(float16x8_t a
, mve_pred16_t p
)
623 return vnegq_x(a
, p
);
624 #else /* POLYMORPHIC */
625 return vnegq_x_f16(a
, p
);
626 #endif /* POLYMORPHIC */
629 // CHECK-LABEL: @test_vnegq_x_f32(
630 // CHECK-NEXT: entry:
631 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
632 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
633 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.neg.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
634 // CHECK-NEXT: ret <4 x float> [[TMP2]]
636 float32x4_t
test_vnegq_x_f32(float32x4_t a
, mve_pred16_t p
)
639 return vnegq_x(a
, p
);
640 #else /* POLYMORPHIC */
641 return vnegq_x_f32(a
, p
);
642 #endif /* POLYMORPHIC */
645 // CHECK-LABEL: @test_vnegq_x_s8(
646 // CHECK-NEXT: entry:
647 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
648 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
649 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.neg.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
650 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
652 int8x16_t
test_vnegq_x_s8(int8x16_t a
, mve_pred16_t p
)
655 return vnegq_x(a
, p
);
656 #else /* POLYMORPHIC */
657 return vnegq_x_s8(a
, p
);
658 #endif /* POLYMORPHIC */
661 // CHECK-LABEL: @test_vnegq_x_s16(
662 // CHECK-NEXT: entry:
663 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
664 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
665 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.neg.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
666 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
668 int16x8_t
test_vnegq_x_s16(int16x8_t a
, mve_pred16_t p
)
671 return vnegq_x(a
, p
);
672 #else /* POLYMORPHIC */
673 return vnegq_x_s16(a
, p
);
674 #endif /* POLYMORPHIC */
677 // CHECK-LABEL: @test_vnegq_x_s32(
678 // CHECK-NEXT: entry:
679 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
680 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
681 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.neg.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
682 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
684 int32x4_t
test_vnegq_x_s32(int32x4_t a
, mve_pred16_t p
)
687 return vnegq_x(a
, p
);
688 #else /* POLYMORPHIC */
689 return vnegq_x_s32(a
, p
);
690 #endif /* POLYMORPHIC */
693 // CHECK-LABEL: @test_vabsq_m_f16(
694 // CHECK-NEXT: entry:
695 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
696 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
697 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.abs.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
698 // CHECK-NEXT: ret <8 x half> [[TMP2]]
700 float16x8_t
test_vabsq_m_f16(float16x8_t inactive
, float16x8_t a
, mve_pred16_t p
)
703 return vabsq_m(inactive
, a
, p
);
704 #else /* POLYMORPHIC */
705 return vabsq_m_f16(inactive
, a
, p
);
706 #endif /* POLYMORPHIC */
709 // CHECK-LABEL: @test_vabsq_m_f32(
710 // CHECK-NEXT: entry:
711 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
712 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
713 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.abs.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
714 // CHECK-NEXT: ret <4 x float> [[TMP2]]
716 float32x4_t
test_vabsq_m_f32(float32x4_t inactive
, float32x4_t a
, mve_pred16_t p
)
719 return vabsq_m(inactive
, a
, p
);
720 #else /* POLYMORPHIC */
721 return vabsq_m_f32(inactive
, a
, p
);
722 #endif /* POLYMORPHIC */
725 // CHECK-LABEL: @test_vabsq_m_s8(
726 // CHECK-NEXT: entry:
727 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
728 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
729 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.abs.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
730 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
732 int8x16_t
test_vabsq_m_s8(int8x16_t inactive
, int8x16_t a
, mve_pred16_t p
)
735 return vabsq_m(inactive
, a
, p
);
736 #else /* POLYMORPHIC */
737 return vabsq_m_s8(inactive
, a
, p
);
738 #endif /* POLYMORPHIC */
741 // CHECK-LABEL: @test_vabsq_m_s16(
742 // CHECK-NEXT: entry:
743 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
744 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
745 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.abs.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
746 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
748 int16x8_t
test_vabsq_m_s16(int16x8_t inactive
, int16x8_t a
, mve_pred16_t p
)
751 return vabsq_m(inactive
, a
, p
);
752 #else /* POLYMORPHIC */
753 return vabsq_m_s16(inactive
, a
, p
);
754 #endif /* POLYMORPHIC */
757 // CHECK-LABEL: @test_vabsq_m_s32(
758 // CHECK-NEXT: entry:
759 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
760 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
761 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.abs.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
762 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
764 int32x4_t
test_vabsq_m_s32(int32x4_t inactive
, int32x4_t a
, mve_pred16_t p
)
767 return vabsq_m(inactive
, a
, p
);
768 #else /* POLYMORPHIC */
769 return vabsq_m_s32(inactive
, a
, p
);
770 #endif /* POLYMORPHIC */
773 // CHECK-LABEL: @test_vabsq_x_f16(
774 // CHECK-NEXT: entry:
775 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
776 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
777 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.abs.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
778 // CHECK-NEXT: ret <8 x half> [[TMP2]]
780 float16x8_t
test_vabsq_x_f16(float16x8_t a
, mve_pred16_t p
)
783 return vabsq_x(a
, p
);
784 #else /* POLYMORPHIC */
785 return vabsq_x_f16(a
, p
);
786 #endif /* POLYMORPHIC */
789 // CHECK-LABEL: @test_vabsq_x_f32(
790 // CHECK-NEXT: entry:
791 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
792 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
793 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.abs.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
794 // CHECK-NEXT: ret <4 x float> [[TMP2]]
796 float32x4_t
test_vabsq_x_f32(float32x4_t a
, mve_pred16_t p
)
799 return vabsq_x(a
, p
);
800 #else /* POLYMORPHIC */
801 return vabsq_x_f32(a
, p
);
802 #endif /* POLYMORPHIC */
805 // CHECK-LABEL: @test_vabsq_x_s8(
806 // CHECK-NEXT: entry:
807 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
808 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
809 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.abs.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
810 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
812 int8x16_t
test_vabsq_x_s8(int8x16_t a
, mve_pred16_t p
)
815 return vabsq_x(a
, p
);
816 #else /* POLYMORPHIC */
817 return vabsq_x_s8(a
, p
);
818 #endif /* POLYMORPHIC */
821 // CHECK-LABEL: @test_vabsq_x_s16(
822 // CHECK-NEXT: entry:
823 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
824 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
825 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.abs.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
826 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
828 int16x8_t
test_vabsq_x_s16(int16x8_t a
, mve_pred16_t p
)
831 return vabsq_x(a
, p
);
832 #else /* POLYMORPHIC */
833 return vabsq_x_s16(a
, p
);
834 #endif /* POLYMORPHIC */
837 // CHECK-LABEL: @test_vabsq_x_s32(
838 // CHECK-NEXT: entry:
839 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
840 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
841 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.abs.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
842 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
844 int32x4_t
test_vabsq_x_s32(int32x4_t a
, mve_pred16_t p
)
847 return vabsq_x(a
, p
);
848 #else /* POLYMORPHIC */
849 return vabsq_x_s32(a
, p
);
850 #endif /* POLYMORPHIC */
853 // CHECK-LABEL: @test_vqnegq_m_s8(
854 // CHECK-NEXT: entry:
855 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
856 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
857 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.qneg.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
858 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
860 int8x16_t
test_vqnegq_m_s8(int8x16_t inactive
, int8x16_t a
, mve_pred16_t p
)
863 return vqnegq_m(inactive
, a
, p
);
864 #else /* POLYMORPHIC */
865 return vqnegq_m_s8(inactive
, a
, p
);
866 #endif /* POLYMORPHIC */
869 // CHECK-LABEL: @test_vqnegq_m_s16(
870 // CHECK-NEXT: entry:
871 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
872 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
873 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.qneg.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
874 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
876 int16x8_t
test_vqnegq_m_s16(int16x8_t inactive
, int16x8_t a
, mve_pred16_t p
)
879 return vqnegq_m(inactive
, a
, p
);
880 #else /* POLYMORPHIC */
881 return vqnegq_m_s16(inactive
, a
, p
);
882 #endif /* POLYMORPHIC */
885 // CHECK-LABEL: @test_vqnegq_m_s32(
886 // CHECK-NEXT: entry:
887 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
888 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
889 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.qneg.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
890 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
892 int32x4_t
test_vqnegq_m_s32(int32x4_t inactive
, int32x4_t a
, mve_pred16_t p
)
895 return vqnegq_m(inactive
, a
, p
);
896 #else /* POLYMORPHIC */
897 return vqnegq_m_s32(inactive
, a
, p
);
898 #endif /* POLYMORPHIC */
901 // CHECK-LABEL: @test_vqabsq_m_s8(
902 // CHECK-NEXT: entry:
903 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
904 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
905 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.qabs.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
906 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
908 int8x16_t
test_vqabsq_m_s8(int8x16_t inactive
, int8x16_t a
, mve_pred16_t p
)
911 return vqabsq_m(inactive
, a
, p
);
912 #else /* POLYMORPHIC */
913 return vqabsq_m_s8(inactive
, a
, p
);
914 #endif /* POLYMORPHIC */
917 // CHECK-LABEL: @test_vqabsq_m_s16(
918 // CHECK-NEXT: entry:
919 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
920 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
921 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.qabs.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
922 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
924 int16x8_t
test_vqabsq_m_s16(int16x8_t inactive
, int16x8_t a
, mve_pred16_t p
)
927 return vqabsq_m(inactive
, a
, p
);
928 #else /* POLYMORPHIC */
929 return vqabsq_m_s16(inactive
, a
, p
);
930 #endif /* POLYMORPHIC */
933 // CHECK-LABEL: @test_vqabsq_m_s32(
934 // CHECK-NEXT: entry:
935 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
936 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
937 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.qabs.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
938 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
940 int32x4_t
test_vqabsq_m_s32(int32x4_t inactive
, int32x4_t a
, mve_pred16_t p
)
943 return vqabsq_m(inactive
, a
, p
);
944 #else /* POLYMORPHIC */
945 return vqabsq_m_s32(inactive
, a
, p
);
946 #endif /* POLYMORPHIC */