1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 // CHECK-LABEL: @test_vcmpeqq_f16(
11 // CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[B:%.*]]
12 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
13 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
14 // CHECK-NEXT: ret i16 [[TMP2]]
16 mve_pred16_t
test_vcmpeqq_f16(float16x8_t a
, float16x8_t b
)
20 #else /* POLYMORPHIC */
21 return vcmpeqq_f16(a
, b
);
22 #endif /* POLYMORPHIC */
25 // CHECK-LABEL: @test_vcmpeqq_f32(
27 // CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[B:%.*]]
28 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
29 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
30 // CHECK-NEXT: ret i16 [[TMP2]]
32 mve_pred16_t
test_vcmpeqq_f32(float32x4_t a
, float32x4_t b
)
36 #else /* POLYMORPHIC */
37 return vcmpeqq_f32(a
, b
);
38 #endif /* POLYMORPHIC */
41 // CHECK-LABEL: @test_vcmpeqq_s8(
43 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]]
44 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
45 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
46 // CHECK-NEXT: ret i16 [[TMP2]]
48 mve_pred16_t
test_vcmpeqq_s8(int8x16_t a
, int8x16_t b
)
52 #else /* POLYMORPHIC */
53 return vcmpeqq_s8(a
, b
);
54 #endif /* POLYMORPHIC */
57 // CHECK-LABEL: @test_vcmpeqq_s16(
59 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]]
60 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
61 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
62 // CHECK-NEXT: ret i16 [[TMP2]]
64 mve_pred16_t
test_vcmpeqq_s16(int16x8_t a
, int16x8_t b
)
68 #else /* POLYMORPHIC */
69 return vcmpeqq_s16(a
, b
);
70 #endif /* POLYMORPHIC */
73 // CHECK-LABEL: @test_vcmpeqq_s32(
75 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]]
76 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
77 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
78 // CHECK-NEXT: ret i16 [[TMP2]]
80 mve_pred16_t
test_vcmpeqq_s32(int32x4_t a
, int32x4_t b
)
84 #else /* POLYMORPHIC */
85 return vcmpeqq_s32(a
, b
);
86 #endif /* POLYMORPHIC */
89 // CHECK-LABEL: @test_vcmpeqq_u8(
91 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]]
92 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
93 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
94 // CHECK-NEXT: ret i16 [[TMP2]]
96 mve_pred16_t
test_vcmpeqq_u8(uint8x16_t a
, uint8x16_t b
)
100 #else /* POLYMORPHIC */
101 return vcmpeqq_u8(a
, b
);
102 #endif /* POLYMORPHIC */
105 // CHECK-LABEL: @test_vcmpeqq_u16(
106 // CHECK-NEXT: entry:
107 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]]
108 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
109 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
110 // CHECK-NEXT: ret i16 [[TMP2]]
112 mve_pred16_t
test_vcmpeqq_u16(uint16x8_t a
, uint16x8_t b
)
115 return vcmpeqq(a
, b
);
116 #else /* POLYMORPHIC */
117 return vcmpeqq_u16(a
, b
);
118 #endif /* POLYMORPHIC */
121 // CHECK-LABEL: @test_vcmpeqq_u32(
122 // CHECK-NEXT: entry:
123 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]]
124 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
125 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
126 // CHECK-NEXT: ret i16 [[TMP2]]
128 mve_pred16_t
test_vcmpeqq_u32(uint32x4_t a
, uint32x4_t b
)
131 return vcmpeqq(a
, b
);
132 #else /* POLYMORPHIC */
133 return vcmpeqq_u32(a
, b
);
134 #endif /* POLYMORPHIC */
137 // CHECK-LABEL: @test_vcmpeqq_n_f16(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
140 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
141 // CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[DOTSPLAT]]
142 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
143 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
144 // CHECK-NEXT: ret i16 [[TMP2]]
146 mve_pred16_t
test_vcmpeqq_n_f16(float16x8_t a
, float16_t b
)
149 return vcmpeqq(a
, b
);
150 #else /* POLYMORPHIC */
151 return vcmpeqq_n_f16(a
, b
);
152 #endif /* POLYMORPHIC */
155 // CHECK-LABEL: @test_vcmpeqq_n_f32(
156 // CHECK-NEXT: entry:
157 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
158 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
159 // CHECK-NEXT: [[TMP0:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[DOTSPLAT]]
160 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
161 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
162 // CHECK-NEXT: ret i16 [[TMP2]]
164 mve_pred16_t
test_vcmpeqq_n_f32(float32x4_t a
, float32_t b
)
167 return vcmpeqq(a
, b
);
168 #else /* POLYMORPHIC */
169 return vcmpeqq_n_f32(a
, b
);
170 #endif /* POLYMORPHIC */
173 // CHECK-LABEL: @test_vcmpeqq_n_s8(
174 // CHECK-NEXT: entry:
175 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
176 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
177 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[DOTSPLAT]]
178 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
179 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
180 // CHECK-NEXT: ret i16 [[TMP2]]
182 mve_pred16_t
test_vcmpeqq_n_s8(int8x16_t a
, int8_t b
)
185 return vcmpeqq(a
, b
);
186 #else /* POLYMORPHIC */
187 return vcmpeqq_n_s8(a
, b
);
188 #endif /* POLYMORPHIC */
191 // CHECK-LABEL: @test_vcmpeqq_n_s16(
192 // CHECK-NEXT: entry:
193 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
194 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
195 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[DOTSPLAT]]
196 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
197 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
198 // CHECK-NEXT: ret i16 [[TMP2]]
200 mve_pred16_t
test_vcmpeqq_n_s16(int16x8_t a
, int16_t b
)
203 return vcmpeqq(a
, b
);
204 #else /* POLYMORPHIC */
205 return vcmpeqq_n_s16(a
, b
);
206 #endif /* POLYMORPHIC */
209 // CHECK-LABEL: @test_vcmpeqq_n_s32(
210 // CHECK-NEXT: entry:
211 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
212 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
213 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[DOTSPLAT]]
214 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
215 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
216 // CHECK-NEXT: ret i16 [[TMP2]]
218 mve_pred16_t
test_vcmpeqq_n_s32(int32x4_t a
, int32_t b
)
221 return vcmpeqq(a
, b
);
222 #else /* POLYMORPHIC */
223 return vcmpeqq_n_s32(a
, b
);
224 #endif /* POLYMORPHIC */
227 // CHECK-LABEL: @test_vcmpeqq_n_u8(
228 // CHECK-NEXT: entry:
229 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
230 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
231 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[DOTSPLAT]]
232 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
233 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
234 // CHECK-NEXT: ret i16 [[TMP2]]
236 mve_pred16_t
test_vcmpeqq_n_u8(uint8x16_t a
, uint8_t b
)
239 return vcmpeqq(a
, b
);
240 #else /* POLYMORPHIC */
241 return vcmpeqq_n_u8(a
, b
);
242 #endif /* POLYMORPHIC */
245 // CHECK-LABEL: @test_vcmpeqq_n_u16(
246 // CHECK-NEXT: entry:
247 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
248 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
249 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[DOTSPLAT]]
250 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
251 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
252 // CHECK-NEXT: ret i16 [[TMP2]]
254 mve_pred16_t
test_vcmpeqq_n_u16(uint16x8_t a
, uint16_t b
)
257 return vcmpeqq(a
, b
);
258 #else /* POLYMORPHIC */
259 return vcmpeqq_n_u16(a
, b
);
260 #endif /* POLYMORPHIC */
263 // CHECK-LABEL: @test_vcmpeqq_n_u32(
264 // CHECK-NEXT: entry:
265 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
266 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
267 // CHECK-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[DOTSPLAT]]
268 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
269 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
270 // CHECK-NEXT: ret i16 [[TMP2]]
272 mve_pred16_t
test_vcmpeqq_n_u32(uint32x4_t a
, uint32_t b
)
275 return vcmpeqq(a
, b
);
276 #else /* POLYMORPHIC */
277 return vcmpeqq_n_u32(a
, b
);
278 #endif /* POLYMORPHIC */
281 // CHECK-LABEL: @test_vcmpeqq_m_f16(
282 // CHECK-NEXT: entry:
283 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
284 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
285 // CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[B:%.*]]
286 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
287 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
288 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
289 // CHECK-NEXT: ret i16 [[TMP5]]
291 mve_pred16_t
test_vcmpeqq_m_f16(float16x8_t a
, float16x8_t b
, mve_pred16_t p
)
294 return vcmpeqq_m(a
, b
, p
);
295 #else /* POLYMORPHIC */
296 return vcmpeqq_m_f16(a
, b
, p
);
297 #endif /* POLYMORPHIC */
300 // CHECK-LABEL: @test_vcmpeqq_m_f32(
301 // CHECK-NEXT: entry:
302 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
303 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
304 // CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[B:%.*]]
305 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
306 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
307 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
308 // CHECK-NEXT: ret i16 [[TMP5]]
310 mve_pred16_t
test_vcmpeqq_m_f32(float32x4_t a
, float32x4_t b
, mve_pred16_t p
)
313 return vcmpeqq_m(a
, b
, p
);
314 #else /* POLYMORPHIC */
315 return vcmpeqq_m_f32(a
, b
, p
);
316 #endif /* POLYMORPHIC */
319 // CHECK-LABEL: @test_vcmpeqq_m_s8(
320 // CHECK-NEXT: entry:
321 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
322 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
323 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]]
324 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
325 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
326 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
327 // CHECK-NEXT: ret i16 [[TMP5]]
329 mve_pred16_t
test_vcmpeqq_m_s8(int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
332 return vcmpeqq_m(a
, b
, p
);
333 #else /* POLYMORPHIC */
334 return vcmpeqq_m_s8(a
, b
, p
);
335 #endif /* POLYMORPHIC */
338 // CHECK-LABEL: @test_vcmpeqq_m_s16(
339 // CHECK-NEXT: entry:
340 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
341 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
342 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]]
343 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
344 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
345 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
346 // CHECK-NEXT: ret i16 [[TMP5]]
348 mve_pred16_t
test_vcmpeqq_m_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
351 return vcmpeqq_m(a
, b
, p
);
352 #else /* POLYMORPHIC */
353 return vcmpeqq_m_s16(a
, b
, p
);
354 #endif /* POLYMORPHIC */
357 // CHECK-LABEL: @test_vcmpeqq_m_s32(
358 // CHECK-NEXT: entry:
359 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
360 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
361 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]]
362 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
363 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
364 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
365 // CHECK-NEXT: ret i16 [[TMP5]]
367 mve_pred16_t
test_vcmpeqq_m_s32(int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
370 return vcmpeqq_m(a
, b
, p
);
371 #else /* POLYMORPHIC */
372 return vcmpeqq_m_s32(a
, b
, p
);
373 #endif /* POLYMORPHIC */
376 // CHECK-LABEL: @test_vcmpeqq_m_u8(
377 // CHECK-NEXT: entry:
378 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
379 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
380 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[B:%.*]]
381 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
382 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
383 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
384 // CHECK-NEXT: ret i16 [[TMP5]]
386 mve_pred16_t
test_vcmpeqq_m_u8(uint8x16_t a
, uint8x16_t b
, mve_pred16_t p
)
389 return vcmpeqq_m(a
, b
, p
);
390 #else /* POLYMORPHIC */
391 return vcmpeqq_m_u8(a
, b
, p
);
392 #endif /* POLYMORPHIC */
395 // CHECK-LABEL: @test_vcmpeqq_m_u16(
396 // CHECK-NEXT: entry:
397 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
398 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
399 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[B:%.*]]
400 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
401 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
402 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
403 // CHECK-NEXT: ret i16 [[TMP5]]
405 mve_pred16_t
test_vcmpeqq_m_u16(uint16x8_t a
, uint16x8_t b
, mve_pred16_t p
)
408 return vcmpeqq_m(a
, b
, p
);
409 #else /* POLYMORPHIC */
410 return vcmpeqq_m_u16(a
, b
, p
);
411 #endif /* POLYMORPHIC */
414 // CHECK-LABEL: @test_vcmpeqq_m_u32(
415 // CHECK-NEXT: entry:
416 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
417 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
418 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[B:%.*]]
419 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
420 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
421 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
422 // CHECK-NEXT: ret i16 [[TMP5]]
424 mve_pred16_t
test_vcmpeqq_m_u32(uint32x4_t a
, uint32x4_t b
, mve_pred16_t p
)
427 return vcmpeqq_m(a
, b
, p
);
428 #else /* POLYMORPHIC */
429 return vcmpeqq_m_u32(a
, b
, p
);
430 #endif /* POLYMORPHIC */
433 // CHECK-LABEL: @test_vcmpeqq_m_n_f16(
434 // CHECK-NEXT: entry:
435 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
436 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
437 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
438 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
439 // CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <8 x half> [[A:%.*]], [[DOTSPLAT]]
440 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
441 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
442 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
443 // CHECK-NEXT: ret i16 [[TMP5]]
445 mve_pred16_t
test_vcmpeqq_m_n_f16(float16x8_t a
, float16_t b
, mve_pred16_t p
)
448 return vcmpeqq_m(a
, b
, p
);
449 #else /* POLYMORPHIC */
450 return vcmpeqq_m_n_f16(a
, b
, p
);
451 #endif /* POLYMORPHIC */
454 // CHECK-LABEL: @test_vcmpeqq_m_n_f32(
455 // CHECK-NEXT: entry:
456 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
457 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
458 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
459 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
460 // CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <4 x float> [[A:%.*]], [[DOTSPLAT]]
461 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
462 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
463 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
464 // CHECK-NEXT: ret i16 [[TMP5]]
466 mve_pred16_t
test_vcmpeqq_m_n_f32(float32x4_t a
, float32_t b
, mve_pred16_t p
)
469 return vcmpeqq_m(a
, b
, p
);
470 #else /* POLYMORPHIC */
471 return vcmpeqq_m_n_f32(a
, b
, p
);
472 #endif /* POLYMORPHIC */
475 // CHECK-LABEL: @test_vcmpeqq_m_n_s8(
476 // CHECK-NEXT: entry:
477 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
478 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
479 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
480 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
481 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[DOTSPLAT]]
482 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
483 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
484 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
485 // CHECK-NEXT: ret i16 [[TMP5]]
487 mve_pred16_t
test_vcmpeqq_m_n_s8(int8x16_t a
, int8_t b
, mve_pred16_t p
)
490 return vcmpeqq_m(a
, b
, p
);
491 #else /* POLYMORPHIC */
492 return vcmpeqq_m_n_s8(a
, b
, p
);
493 #endif /* POLYMORPHIC */
496 // CHECK-LABEL: @test_vcmpeqq_m_n_s16(
497 // CHECK-NEXT: entry:
498 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
499 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
500 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
501 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
502 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[DOTSPLAT]]
503 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
504 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
505 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
506 // CHECK-NEXT: ret i16 [[TMP5]]
508 mve_pred16_t
test_vcmpeqq_m_n_s16(int16x8_t a
, int16_t b
, mve_pred16_t p
)
511 return vcmpeqq_m(a
, b
, p
);
512 #else /* POLYMORPHIC */
513 return vcmpeqq_m_n_s16(a
, b
, p
);
514 #endif /* POLYMORPHIC */
517 // CHECK-LABEL: @test_vcmpeqq_m_n_s32(
518 // CHECK-NEXT: entry:
519 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
520 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
521 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
522 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
523 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[DOTSPLAT]]
524 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
525 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
526 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
527 // CHECK-NEXT: ret i16 [[TMP5]]
529 mve_pred16_t
test_vcmpeqq_m_n_s32(int32x4_t a
, int32_t b
, mve_pred16_t p
)
532 return vcmpeqq_m(a
, b
, p
);
533 #else /* POLYMORPHIC */
534 return vcmpeqq_m_n_s32(a
, b
, p
);
535 #endif /* POLYMORPHIC */
538 // CHECK-LABEL: @test_vcmpeqq_m_n_u8(
539 // CHECK-NEXT: entry:
540 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
541 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
542 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
543 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
544 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <16 x i8> [[A:%.*]], [[DOTSPLAT]]
545 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
546 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
547 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
548 // CHECK-NEXT: ret i16 [[TMP5]]
550 mve_pred16_t
test_vcmpeqq_m_n_u8(uint8x16_t a
, uint8_t b
, mve_pred16_t p
)
553 return vcmpeqq_m(a
, b
, p
);
554 #else /* POLYMORPHIC */
555 return vcmpeqq_m_n_u8(a
, b
, p
);
556 #endif /* POLYMORPHIC */
559 // CHECK-LABEL: @test_vcmpeqq_m_n_u16(
560 // CHECK-NEXT: entry:
561 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
562 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
563 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
564 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
565 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <8 x i16> [[A:%.*]], [[DOTSPLAT]]
566 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
567 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
568 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
569 // CHECK-NEXT: ret i16 [[TMP5]]
571 mve_pred16_t
test_vcmpeqq_m_n_u16(uint16x8_t a
, uint16_t b
, mve_pred16_t p
)
574 return vcmpeqq_m(a
, b
, p
);
575 #else /* POLYMORPHIC */
576 return vcmpeqq_m_n_u16(a
, b
, p
);
577 #endif /* POLYMORPHIC */
580 // CHECK-LABEL: @test_vcmpeqq_m_n_u32(
581 // CHECK-NEXT: entry:
582 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
583 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
584 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
585 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
586 // CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[A:%.*]], [[DOTSPLAT]]
587 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
588 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
589 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
590 // CHECK-NEXT: ret i16 [[TMP5]]
592 mve_pred16_t
test_vcmpeqq_m_n_u32(uint32x4_t a
, uint32_t b
, mve_pred16_t p
)
595 return vcmpeqq_m(a
, b
, p
);
596 #else /* POLYMORPHIC */
597 return vcmpeqq_m_n_u32(a
, b
, p
);
598 #endif /* POLYMORPHIC */
601 // CHECK-LABEL: @test_vcmpneq_f16(
602 // CHECK-NEXT: entry:
603 // CHECK-NEXT: [[TMP0:%.*]] = fcmp une <8 x half> [[A:%.*]], [[B:%.*]]
604 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
605 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
606 // CHECK-NEXT: ret i16 [[TMP2]]
608 mve_pred16_t
test_vcmpneq_f16(float16x8_t a
, float16x8_t b
)
611 return vcmpneq(a
, b
);
612 #else /* POLYMORPHIC */
613 return vcmpneq_f16(a
, b
);
614 #endif /* POLYMORPHIC */
617 // CHECK-LABEL: @test_vcmpneq_f32(
618 // CHECK-NEXT: entry:
619 // CHECK-NEXT: [[TMP0:%.*]] = fcmp une <4 x float> [[A:%.*]], [[B:%.*]]
620 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
621 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
622 // CHECK-NEXT: ret i16 [[TMP2]]
624 mve_pred16_t
test_vcmpneq_f32(float32x4_t a
, float32x4_t b
)
627 return vcmpneq(a
, b
);
628 #else /* POLYMORPHIC */
629 return vcmpneq_f32(a
, b
);
630 #endif /* POLYMORPHIC */
633 // CHECK-LABEL: @test_vcmpneq_s8(
634 // CHECK-NEXT: entry:
635 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]]
636 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
637 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
638 // CHECK-NEXT: ret i16 [[TMP2]]
640 mve_pred16_t
test_vcmpneq_s8(int8x16_t a
, int8x16_t b
)
643 return vcmpneq(a
, b
);
644 #else /* POLYMORPHIC */
645 return vcmpneq_s8(a
, b
);
646 #endif /* POLYMORPHIC */
649 // CHECK-LABEL: @test_vcmpneq_s16(
650 // CHECK-NEXT: entry:
651 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]]
652 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
653 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
654 // CHECK-NEXT: ret i16 [[TMP2]]
656 mve_pred16_t
test_vcmpneq_s16(int16x8_t a
, int16x8_t b
)
659 return vcmpneq(a
, b
);
660 #else /* POLYMORPHIC */
661 return vcmpneq_s16(a
, b
);
662 #endif /* POLYMORPHIC */
665 // CHECK-LABEL: @test_vcmpneq_s32(
666 // CHECK-NEXT: entry:
667 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]]
668 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
669 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
670 // CHECK-NEXT: ret i16 [[TMP2]]
672 mve_pred16_t
test_vcmpneq_s32(int32x4_t a
, int32x4_t b
)
675 return vcmpneq(a
, b
);
676 #else /* POLYMORPHIC */
677 return vcmpneq_s32(a
, b
);
678 #endif /* POLYMORPHIC */
681 // CHECK-LABEL: @test_vcmpneq_u8(
682 // CHECK-NEXT: entry:
683 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]]
684 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
685 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
686 // CHECK-NEXT: ret i16 [[TMP2]]
688 mve_pred16_t
test_vcmpneq_u8(uint8x16_t a
, uint8x16_t b
)
691 return vcmpneq(a
, b
);
692 #else /* POLYMORPHIC */
693 return vcmpneq_u8(a
, b
);
694 #endif /* POLYMORPHIC */
697 // CHECK-LABEL: @test_vcmpneq_u16(
698 // CHECK-NEXT: entry:
699 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]]
700 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
701 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
702 // CHECK-NEXT: ret i16 [[TMP2]]
704 mve_pred16_t
test_vcmpneq_u16(uint16x8_t a
, uint16x8_t b
)
707 return vcmpneq(a
, b
);
708 #else /* POLYMORPHIC */
709 return vcmpneq_u16(a
, b
);
710 #endif /* POLYMORPHIC */
713 // CHECK-LABEL: @test_vcmpneq_u32(
714 // CHECK-NEXT: entry:
715 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]]
716 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
717 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
718 // CHECK-NEXT: ret i16 [[TMP2]]
720 mve_pred16_t
test_vcmpneq_u32(uint32x4_t a
, uint32x4_t b
)
723 return vcmpneq(a
, b
);
724 #else /* POLYMORPHIC */
725 return vcmpneq_u32(a
, b
);
726 #endif /* POLYMORPHIC */
729 // CHECK-LABEL: @test_vcmpneq_n_f16(
730 // CHECK-NEXT: entry:
731 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
732 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
733 // CHECK-NEXT: [[TMP0:%.*]] = fcmp une <8 x half> [[A:%.*]], [[DOTSPLAT]]
734 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
735 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
736 // CHECK-NEXT: ret i16 [[TMP2]]
738 mve_pred16_t
test_vcmpneq_n_f16(float16x8_t a
, float16_t b
)
741 return vcmpneq(a
, b
);
742 #else /* POLYMORPHIC */
743 return vcmpneq_n_f16(a
, b
);
744 #endif /* POLYMORPHIC */
747 // CHECK-LABEL: @test_vcmpneq_n_f32(
748 // CHECK-NEXT: entry:
749 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
750 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
751 // CHECK-NEXT: [[TMP0:%.*]] = fcmp une <4 x float> [[A:%.*]], [[DOTSPLAT]]
752 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
753 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
754 // CHECK-NEXT: ret i16 [[TMP2]]
756 mve_pred16_t
test_vcmpneq_n_f32(float32x4_t a
, float32_t b
)
759 return vcmpneq(a
, b
);
760 #else /* POLYMORPHIC */
761 return vcmpneq_n_f32(a
, b
);
762 #endif /* POLYMORPHIC */
765 // CHECK-LABEL: @test_vcmpneq_n_s8(
766 // CHECK-NEXT: entry:
767 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
768 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
769 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[DOTSPLAT]]
770 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
771 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
772 // CHECK-NEXT: ret i16 [[TMP2]]
774 mve_pred16_t
test_vcmpneq_n_s8(int8x16_t a
, int8_t b
)
777 return vcmpneq(a
, b
);
778 #else /* POLYMORPHIC */
779 return vcmpneq_n_s8(a
, b
);
780 #endif /* POLYMORPHIC */
783 // CHECK-LABEL: @test_vcmpneq_n_s16(
784 // CHECK-NEXT: entry:
785 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
786 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
787 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[DOTSPLAT]]
788 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
789 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
790 // CHECK-NEXT: ret i16 [[TMP2]]
792 mve_pred16_t
test_vcmpneq_n_s16(int16x8_t a
, int16_t b
)
795 return vcmpneq(a
, b
);
796 #else /* POLYMORPHIC */
797 return vcmpneq_n_s16(a
, b
);
798 #endif /* POLYMORPHIC */
801 // CHECK-LABEL: @test_vcmpneq_n_s32(
802 // CHECK-NEXT: entry:
803 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
804 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
805 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[DOTSPLAT]]
806 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
807 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
808 // CHECK-NEXT: ret i16 [[TMP2]]
810 mve_pred16_t
test_vcmpneq_n_s32(int32x4_t a
, int32_t b
)
813 return vcmpneq(a
, b
);
814 #else /* POLYMORPHIC */
815 return vcmpneq_n_s32(a
, b
);
816 #endif /* POLYMORPHIC */
819 // CHECK-LABEL: @test_vcmpneq_n_u8(
820 // CHECK-NEXT: entry:
821 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
822 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
823 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[DOTSPLAT]]
824 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
825 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
826 // CHECK-NEXT: ret i16 [[TMP2]]
828 mve_pred16_t
test_vcmpneq_n_u8(uint8x16_t a
, uint8_t b
)
831 return vcmpneq(a
, b
);
832 #else /* POLYMORPHIC */
833 return vcmpneq_n_u8(a
, b
);
834 #endif /* POLYMORPHIC */
837 // CHECK-LABEL: @test_vcmpneq_n_u16(
838 // CHECK-NEXT: entry:
839 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
840 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
841 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[DOTSPLAT]]
842 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
843 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
844 // CHECK-NEXT: ret i16 [[TMP2]]
846 mve_pred16_t
test_vcmpneq_n_u16(uint16x8_t a
, uint16_t b
)
849 return vcmpneq(a
, b
);
850 #else /* POLYMORPHIC */
851 return vcmpneq_n_u16(a
, b
);
852 #endif /* POLYMORPHIC */
855 // CHECK-LABEL: @test_vcmpneq_n_u32(
856 // CHECK-NEXT: entry:
857 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
858 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
859 // CHECK-NEXT: [[TMP0:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[DOTSPLAT]]
860 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
861 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
862 // CHECK-NEXT: ret i16 [[TMP2]]
864 mve_pred16_t
test_vcmpneq_n_u32(uint32x4_t a
, uint32_t b
)
867 return vcmpneq(a
, b
);
868 #else /* POLYMORPHIC */
869 return vcmpneq_n_u32(a
, b
);
870 #endif /* POLYMORPHIC */
873 // CHECK-LABEL: @test_vcmpneq_m_f16(
874 // CHECK-NEXT: entry:
875 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
876 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
877 // CHECK-NEXT: [[TMP2:%.*]] = fcmp une <8 x half> [[A:%.*]], [[B:%.*]]
878 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
879 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
880 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
881 // CHECK-NEXT: ret i16 [[TMP5]]
883 mve_pred16_t
test_vcmpneq_m_f16(float16x8_t a
, float16x8_t b
, mve_pred16_t p
)
886 return vcmpneq_m(a
, b
, p
);
887 #else /* POLYMORPHIC */
888 return vcmpneq_m_f16(a
, b
, p
);
889 #endif /* POLYMORPHIC */
892 // CHECK-LABEL: @test_vcmpneq_m_f32(
893 // CHECK-NEXT: entry:
894 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
895 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
896 // CHECK-NEXT: [[TMP2:%.*]] = fcmp une <4 x float> [[A:%.*]], [[B:%.*]]
897 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
898 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
899 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
900 // CHECK-NEXT: ret i16 [[TMP5]]
902 mve_pred16_t
test_vcmpneq_m_f32(float32x4_t a
, float32x4_t b
, mve_pred16_t p
)
905 return vcmpneq_m(a
, b
, p
);
906 #else /* POLYMORPHIC */
907 return vcmpneq_m_f32(a
, b
, p
);
908 #endif /* POLYMORPHIC */
911 // CHECK-LABEL: @test_vcmpneq_m_s8(
912 // CHECK-NEXT: entry:
913 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
914 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
915 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]]
916 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
917 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
918 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
919 // CHECK-NEXT: ret i16 [[TMP5]]
921 mve_pred16_t
test_vcmpneq_m_s8(int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
924 return vcmpneq_m(a
, b
, p
);
925 #else /* POLYMORPHIC */
926 return vcmpneq_m_s8(a
, b
, p
);
927 #endif /* POLYMORPHIC */
930 // CHECK-LABEL: @test_vcmpneq_m_s16(
931 // CHECK-NEXT: entry:
932 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
933 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
934 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]]
935 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
936 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
937 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
938 // CHECK-NEXT: ret i16 [[TMP5]]
940 mve_pred16_t
test_vcmpneq_m_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
943 return vcmpneq_m(a
, b
, p
);
944 #else /* POLYMORPHIC */
945 return vcmpneq_m_s16(a
, b
, p
);
946 #endif /* POLYMORPHIC */
949 // CHECK-LABEL: @test_vcmpneq_m_s32(
950 // CHECK-NEXT: entry:
951 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
952 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
953 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]]
954 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
955 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
956 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
957 // CHECK-NEXT: ret i16 [[TMP5]]
959 mve_pred16_t
test_vcmpneq_m_s32(int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
962 return vcmpneq_m(a
, b
, p
);
963 #else /* POLYMORPHIC */
964 return vcmpneq_m_s32(a
, b
, p
);
965 #endif /* POLYMORPHIC */
968 // CHECK-LABEL: @test_vcmpneq_m_u8(
969 // CHECK-NEXT: entry:
970 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
971 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
972 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[B:%.*]]
973 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
974 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
975 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
976 // CHECK-NEXT: ret i16 [[TMP5]]
978 mve_pred16_t
test_vcmpneq_m_u8(uint8x16_t a
, uint8x16_t b
, mve_pred16_t p
)
981 return vcmpneq_m(a
, b
, p
);
982 #else /* POLYMORPHIC */
983 return vcmpneq_m_u8(a
, b
, p
);
984 #endif /* POLYMORPHIC */
987 // CHECK-LABEL: @test_vcmpneq_m_u16(
988 // CHECK-NEXT: entry:
989 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
990 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
991 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[B:%.*]]
992 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
993 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
994 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
995 // CHECK-NEXT: ret i16 [[TMP5]]
997 mve_pred16_t
test_vcmpneq_m_u16(uint16x8_t a
, uint16x8_t b
, mve_pred16_t p
)
1000 return vcmpneq_m(a
, b
, p
);
1001 #else /* POLYMORPHIC */
1002 return vcmpneq_m_u16(a
, b
, p
);
1003 #endif /* POLYMORPHIC */
1006 // CHECK-LABEL: @test_vcmpneq_m_u32(
1007 // CHECK-NEXT: entry:
1008 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1009 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1010 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[B:%.*]]
1011 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
1012 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
1013 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1014 // CHECK-NEXT: ret i16 [[TMP5]]
1016 mve_pred16_t
test_vcmpneq_m_u32(uint32x4_t a
, uint32x4_t b
, mve_pred16_t p
)
1019 return vcmpneq_m(a
, b
, p
);
1020 #else /* POLYMORPHIC */
1021 return vcmpneq_m_u32(a
, b
, p
);
1022 #endif /* POLYMORPHIC */
1025 // CHECK-LABEL: @test_vcmpneq_m_n_f16(
1026 // CHECK-NEXT: entry:
1027 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1028 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1029 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
1030 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
1031 // CHECK-NEXT: [[TMP2:%.*]] = fcmp une <8 x half> [[A:%.*]], [[DOTSPLAT]]
1032 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
1033 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
1034 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1035 // CHECK-NEXT: ret i16 [[TMP5]]
1037 mve_pred16_t
test_vcmpneq_m_n_f16(float16x8_t a
, float16_t b
, mve_pred16_t p
)
1040 return vcmpneq_m(a
, b
, p
);
1041 #else /* POLYMORPHIC */
1042 return vcmpneq_m_n_f16(a
, b
, p
);
1043 #endif /* POLYMORPHIC */
1046 // CHECK-LABEL: @test_vcmpneq_m_n_f32(
1047 // CHECK-NEXT: entry:
1048 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1049 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1050 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
1051 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
1052 // CHECK-NEXT: [[TMP2:%.*]] = fcmp une <4 x float> [[A:%.*]], [[DOTSPLAT]]
1053 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
1054 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
1055 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1056 // CHECK-NEXT: ret i16 [[TMP5]]
1058 mve_pred16_t
test_vcmpneq_m_n_f32(float32x4_t a
, float32_t b
, mve_pred16_t p
)
1061 return vcmpneq_m(a
, b
, p
);
1062 #else /* POLYMORPHIC */
1063 return vcmpneq_m_n_f32(a
, b
, p
);
1064 #endif /* POLYMORPHIC */
1067 // CHECK-LABEL: @test_vcmpneq_m_n_s8(
1068 // CHECK-NEXT: entry:
1069 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1070 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1071 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
1072 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
1073 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[DOTSPLAT]]
1074 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
1075 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
1076 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1077 // CHECK-NEXT: ret i16 [[TMP5]]
1079 mve_pred16_t
test_vcmpneq_m_n_s8(int8x16_t a
, int8_t b
, mve_pred16_t p
)
1082 return vcmpneq_m(a
, b
, p
);
1083 #else /* POLYMORPHIC */
1084 return vcmpneq_m_n_s8(a
, b
, p
);
1085 #endif /* POLYMORPHIC */
1088 // CHECK-LABEL: @test_vcmpneq_m_n_s16(
1089 // CHECK-NEXT: entry:
1090 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1091 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1092 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
1093 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
1094 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[DOTSPLAT]]
1095 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
1096 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
1097 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1098 // CHECK-NEXT: ret i16 [[TMP5]]
1100 mve_pred16_t
test_vcmpneq_m_n_s16(int16x8_t a
, int16_t b
, mve_pred16_t p
)
1103 return vcmpneq_m(a
, b
, p
);
1104 #else /* POLYMORPHIC */
1105 return vcmpneq_m_n_s16(a
, b
, p
);
1106 #endif /* POLYMORPHIC */
1109 // CHECK-LABEL: @test_vcmpneq_m_n_s32(
1110 // CHECK-NEXT: entry:
1111 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1112 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1113 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
1114 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
1115 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[DOTSPLAT]]
1116 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
1117 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
1118 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1119 // CHECK-NEXT: ret i16 [[TMP5]]
1121 mve_pred16_t
test_vcmpneq_m_n_s32(int32x4_t a
, int32_t b
, mve_pred16_t p
)
1124 return vcmpneq_m(a
, b
, p
);
1125 #else /* POLYMORPHIC */
1126 return vcmpneq_m_n_s32(a
, b
, p
);
1127 #endif /* POLYMORPHIC */
1130 // CHECK-LABEL: @test_vcmpneq_m_n_u8(
1131 // CHECK-NEXT: entry:
1132 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1133 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1134 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
1135 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
1136 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <16 x i8> [[A:%.*]], [[DOTSPLAT]]
1137 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
1138 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
1139 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1140 // CHECK-NEXT: ret i16 [[TMP5]]
1142 mve_pred16_t
test_vcmpneq_m_n_u8(uint8x16_t a
, uint8_t b
, mve_pred16_t p
)
1145 return vcmpneq_m(a
, b
, p
);
1146 #else /* POLYMORPHIC */
1147 return vcmpneq_m_n_u8(a
, b
, p
);
1148 #endif /* POLYMORPHIC */
1151 // CHECK-LABEL: @test_vcmpneq_m_n_u16(
1152 // CHECK-NEXT: entry:
1153 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1154 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1155 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
1156 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
1157 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <8 x i16> [[A:%.*]], [[DOTSPLAT]]
1158 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
1159 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
1160 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1161 // CHECK-NEXT: ret i16 [[TMP5]]
1163 mve_pred16_t
test_vcmpneq_m_n_u16(uint16x8_t a
, uint16_t b
, mve_pred16_t p
)
1166 return vcmpneq_m(a
, b
, p
);
1167 #else /* POLYMORPHIC */
1168 return vcmpneq_m_n_u16(a
, b
, p
);
1169 #endif /* POLYMORPHIC */
1172 // CHECK-LABEL: @test_vcmpneq_m_n_u32(
1173 // CHECK-NEXT: entry:
1174 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1175 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1176 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
1177 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
1178 // CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[A:%.*]], [[DOTSPLAT]]
1179 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
1180 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
1181 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1182 // CHECK-NEXT: ret i16 [[TMP5]]
1184 mve_pred16_t
test_vcmpneq_m_n_u32(uint32x4_t a
, uint32_t b
, mve_pred16_t p
)
1187 return vcmpneq_m(a
, b
, p
);
1188 #else /* POLYMORPHIC */
1189 return vcmpneq_m_n_u32(a
, b
, p
);
1190 #endif /* POLYMORPHIC */
1193 // CHECK-LABEL: @test_vcmpgeq_f16(
1194 // CHECK-NEXT: entry:
1195 // CHECK-NEXT: [[TMP0:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[B:%.*]]
1196 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1197 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1198 // CHECK-NEXT: ret i16 [[TMP2]]
1200 mve_pred16_t
test_vcmpgeq_f16(float16x8_t a
, float16x8_t b
)
1203 return vcmpgeq(a
, b
);
1204 #else /* POLYMORPHIC */
1205 return vcmpgeq_f16(a
, b
);
1206 #endif /* POLYMORPHIC */
1209 // CHECK-LABEL: @test_vcmpgeq_f32(
1210 // CHECK-NEXT: entry:
1211 // CHECK-NEXT: [[TMP0:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[B:%.*]]
1212 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1213 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1214 // CHECK-NEXT: ret i16 [[TMP2]]
1216 mve_pred16_t
test_vcmpgeq_f32(float32x4_t a
, float32x4_t b
)
1219 return vcmpgeq(a
, b
);
1220 #else /* POLYMORPHIC */
1221 return vcmpgeq_f32(a
, b
);
1222 #endif /* POLYMORPHIC */
1225 // CHECK-LABEL: @test_vcmpgeq_s8(
1226 // CHECK-NEXT: entry:
1227 // CHECK-NEXT: [[TMP0:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[B:%.*]]
1228 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
1229 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1230 // CHECK-NEXT: ret i16 [[TMP2]]
1232 mve_pred16_t
test_vcmpgeq_s8(int8x16_t a
, int8x16_t b
)
1235 return vcmpgeq(a
, b
);
1236 #else /* POLYMORPHIC */
1237 return vcmpgeq_s8(a
, b
);
1238 #endif /* POLYMORPHIC */
1241 // CHECK-LABEL: @test_vcmpgeq_s16(
1242 // CHECK-NEXT: entry:
1243 // CHECK-NEXT: [[TMP0:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[B:%.*]]
1244 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1245 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1246 // CHECK-NEXT: ret i16 [[TMP2]]
1248 mve_pred16_t
test_vcmpgeq_s16(int16x8_t a
, int16x8_t b
)
1251 return vcmpgeq(a
, b
);
1252 #else /* POLYMORPHIC */
1253 return vcmpgeq_s16(a
, b
);
1254 #endif /* POLYMORPHIC */
1257 // CHECK-LABEL: @test_vcmpgeq_s32(
1258 // CHECK-NEXT: entry:
1259 // CHECK-NEXT: [[TMP0:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[B:%.*]]
1260 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1261 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1262 // CHECK-NEXT: ret i16 [[TMP2]]
1264 mve_pred16_t
test_vcmpgeq_s32(int32x4_t a
, int32x4_t b
)
1267 return vcmpgeq(a
, b
);
1268 #else /* POLYMORPHIC */
1269 return vcmpgeq_s32(a
, b
);
1270 #endif /* POLYMORPHIC */
1273 // CHECK-LABEL: @test_vcmpcsq_u8(
1274 // CHECK-NEXT: entry:
1275 // CHECK-NEXT: [[TMP0:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[B:%.*]]
1276 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
1277 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1278 // CHECK-NEXT: ret i16 [[TMP2]]
1280 mve_pred16_t
test_vcmpcsq_u8(uint8x16_t a
, uint8x16_t b
)
1283 return vcmpcsq(a
, b
);
1284 #else /* POLYMORPHIC */
1285 return vcmpcsq_u8(a
, b
);
1286 #endif /* POLYMORPHIC */
1289 // CHECK-LABEL: @test_vcmpcsq_u16(
1290 // CHECK-NEXT: entry:
1291 // CHECK-NEXT: [[TMP0:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[B:%.*]]
1292 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1293 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1294 // CHECK-NEXT: ret i16 [[TMP2]]
1296 mve_pred16_t
test_vcmpcsq_u16(uint16x8_t a
, uint16x8_t b
)
1299 return vcmpcsq(a
, b
);
1300 #else /* POLYMORPHIC */
1301 return vcmpcsq_u16(a
, b
);
1302 #endif /* POLYMORPHIC */
1305 // CHECK-LABEL: @test_vcmpcsq_u32(
1306 // CHECK-NEXT: entry:
1307 // CHECK-NEXT: [[TMP0:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[B:%.*]]
1308 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1309 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1310 // CHECK-NEXT: ret i16 [[TMP2]]
1312 mve_pred16_t
test_vcmpcsq_u32(uint32x4_t a
, uint32x4_t b
)
1315 return vcmpcsq(a
, b
);
1316 #else /* POLYMORPHIC */
1317 return vcmpcsq_u32(a
, b
);
1318 #endif /* POLYMORPHIC */
1321 // CHECK-LABEL: @test_vcmpgeq_n_f16(
1322 // CHECK-NEXT: entry:
1323 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
1324 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
1325 // CHECK-NEXT: [[TMP0:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[DOTSPLAT]]
1326 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1327 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1328 // CHECK-NEXT: ret i16 [[TMP2]]
1330 mve_pred16_t
test_vcmpgeq_n_f16(float16x8_t a
, float16_t b
)
1333 return vcmpgeq(a
, b
);
1334 #else /* POLYMORPHIC */
1335 return vcmpgeq_n_f16(a
, b
);
1336 #endif /* POLYMORPHIC */
1339 // CHECK-LABEL: @test_vcmpgeq_n_f32(
1340 // CHECK-NEXT: entry:
1341 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
1342 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
1343 // CHECK-NEXT: [[TMP0:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[DOTSPLAT]]
1344 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1345 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1346 // CHECK-NEXT: ret i16 [[TMP2]]
1348 mve_pred16_t
test_vcmpgeq_n_f32(float32x4_t a
, float32_t b
)
1351 return vcmpgeq(a
, b
);
1352 #else /* POLYMORPHIC */
1353 return vcmpgeq_n_f32(a
, b
);
1354 #endif /* POLYMORPHIC */
1357 // CHECK-LABEL: @test_vcmpgeq_n_s8(
1358 // CHECK-NEXT: entry:
1359 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
1360 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
1361 // CHECK-NEXT: [[TMP0:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[DOTSPLAT]]
1362 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
1363 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1364 // CHECK-NEXT: ret i16 [[TMP2]]
1366 mve_pred16_t
test_vcmpgeq_n_s8(int8x16_t a
, int8_t b
)
1369 return vcmpgeq(a
, b
);
1370 #else /* POLYMORPHIC */
1371 return vcmpgeq_n_s8(a
, b
);
1372 #endif /* POLYMORPHIC */
1375 // CHECK-LABEL: @test_vcmpgeq_n_s16(
1376 // CHECK-NEXT: entry:
1377 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
1378 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
1379 // CHECK-NEXT: [[TMP0:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[DOTSPLAT]]
1380 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1381 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1382 // CHECK-NEXT: ret i16 [[TMP2]]
1384 mve_pred16_t
test_vcmpgeq_n_s16(int16x8_t a
, int16_t b
)
1387 return vcmpgeq(a
, b
);
1388 #else /* POLYMORPHIC */
1389 return vcmpgeq_n_s16(a
, b
);
1390 #endif /* POLYMORPHIC */
1393 // CHECK-LABEL: @test_vcmpgeq_n_s32(
1394 // CHECK-NEXT: entry:
1395 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
1396 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
1397 // CHECK-NEXT: [[TMP0:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[DOTSPLAT]]
1398 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1399 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1400 // CHECK-NEXT: ret i16 [[TMP2]]
1402 mve_pred16_t
test_vcmpgeq_n_s32(int32x4_t a
, int32_t b
)
1405 return vcmpgeq(a
, b
);
1406 #else /* POLYMORPHIC */
1407 return vcmpgeq_n_s32(a
, b
);
1408 #endif /* POLYMORPHIC */
1411 // CHECK-LABEL: @test_vcmpcsq_n_u8(
1412 // CHECK-NEXT: entry:
1413 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
1414 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
1415 // CHECK-NEXT: [[TMP0:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[DOTSPLAT]]
1416 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
1417 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1418 // CHECK-NEXT: ret i16 [[TMP2]]
1420 mve_pred16_t
test_vcmpcsq_n_u8(uint8x16_t a
, uint8_t b
)
1423 return vcmpcsq(a
, b
);
1424 #else /* POLYMORPHIC */
1425 return vcmpcsq_n_u8(a
, b
);
1426 #endif /* POLYMORPHIC */
1429 // CHECK-LABEL: @test_vcmpcsq_n_u16(
1430 // CHECK-NEXT: entry:
1431 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
1432 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
1433 // CHECK-NEXT: [[TMP0:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[DOTSPLAT]]
1434 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1435 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1436 // CHECK-NEXT: ret i16 [[TMP2]]
1438 mve_pred16_t
test_vcmpcsq_n_u16(uint16x8_t a
, uint16_t b
)
1441 return vcmpcsq(a
, b
);
1442 #else /* POLYMORPHIC */
1443 return vcmpcsq_n_u16(a
, b
);
1444 #endif /* POLYMORPHIC */
1447 // CHECK-LABEL: @test_vcmpcsq_n_u32(
1448 // CHECK-NEXT: entry:
1449 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
1450 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
1451 // CHECK-NEXT: [[TMP0:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[DOTSPLAT]]
1452 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1453 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1454 // CHECK-NEXT: ret i16 [[TMP2]]
1456 mve_pred16_t
test_vcmpcsq_n_u32(uint32x4_t a
, uint32_t b
)
1459 return vcmpcsq(a
, b
);
1460 #else /* POLYMORPHIC */
1461 return vcmpcsq_n_u32(a
, b
);
1462 #endif /* POLYMORPHIC */
1465 // CHECK-LABEL: @test_vcmpgeq_m_f16(
1466 // CHECK-NEXT: entry:
1467 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1468 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1469 // CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[B:%.*]]
1470 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
1471 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
1472 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1473 // CHECK-NEXT: ret i16 [[TMP5]]
1475 mve_pred16_t
test_vcmpgeq_m_f16(float16x8_t a
, float16x8_t b
, mve_pred16_t p
)
1478 return vcmpgeq_m(a
, b
, p
);
1479 #else /* POLYMORPHIC */
1480 return vcmpgeq_m_f16(a
, b
, p
);
1481 #endif /* POLYMORPHIC */
1484 // CHECK-LABEL: @test_vcmpgeq_m_f32(
1485 // CHECK-NEXT: entry:
1486 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1487 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1488 // CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[B:%.*]]
1489 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
1490 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
1491 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1492 // CHECK-NEXT: ret i16 [[TMP5]]
1494 mve_pred16_t
test_vcmpgeq_m_f32(float32x4_t a
, float32x4_t b
, mve_pred16_t p
)
1497 return vcmpgeq_m(a
, b
, p
);
1498 #else /* POLYMORPHIC */
1499 return vcmpgeq_m_f32(a
, b
, p
);
1500 #endif /* POLYMORPHIC */
1503 // CHECK-LABEL: @test_vcmpgeq_m_s8(
1504 // CHECK-NEXT: entry:
1505 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1506 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1507 // CHECK-NEXT: [[TMP2:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[B:%.*]]
1508 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
1509 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
1510 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1511 // CHECK-NEXT: ret i16 [[TMP5]]
1513 mve_pred16_t
test_vcmpgeq_m_s8(int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
1516 return vcmpgeq_m(a
, b
, p
);
1517 #else /* POLYMORPHIC */
1518 return vcmpgeq_m_s8(a
, b
, p
);
1519 #endif /* POLYMORPHIC */
1522 // CHECK-LABEL: @test_vcmpgeq_m_s16(
1523 // CHECK-NEXT: entry:
1524 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1525 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1526 // CHECK-NEXT: [[TMP2:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[B:%.*]]
1527 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
1528 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
1529 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1530 // CHECK-NEXT: ret i16 [[TMP5]]
1532 mve_pred16_t
test_vcmpgeq_m_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
1535 return vcmpgeq_m(a
, b
, p
);
1536 #else /* POLYMORPHIC */
1537 return vcmpgeq_m_s16(a
, b
, p
);
1538 #endif /* POLYMORPHIC */
1541 // CHECK-LABEL: @test_vcmpgeq_m_s32(
1542 // CHECK-NEXT: entry:
1543 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1544 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1545 // CHECK-NEXT: [[TMP2:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[B:%.*]]
1546 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
1547 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
1548 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1549 // CHECK-NEXT: ret i16 [[TMP5]]
1551 mve_pred16_t
test_vcmpgeq_m_s32(int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
1554 return vcmpgeq_m(a
, b
, p
);
1555 #else /* POLYMORPHIC */
1556 return vcmpgeq_m_s32(a
, b
, p
);
1557 #endif /* POLYMORPHIC */
1560 // CHECK-LABEL: @test_vcmpcsq_m_u8(
1561 // CHECK-NEXT: entry:
1562 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1563 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1564 // CHECK-NEXT: [[TMP2:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[B:%.*]]
1565 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
1566 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
1567 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1568 // CHECK-NEXT: ret i16 [[TMP5]]
1570 mve_pred16_t
test_vcmpcsq_m_u8(uint8x16_t a
, uint8x16_t b
, mve_pred16_t p
)
1573 return vcmpcsq_m(a
, b
, p
);
1574 #else /* POLYMORPHIC */
1575 return vcmpcsq_m_u8(a
, b
, p
);
1576 #endif /* POLYMORPHIC */
1579 // CHECK-LABEL: @test_vcmpcsq_m_u16(
1580 // CHECK-NEXT: entry:
1581 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1582 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1583 // CHECK-NEXT: [[TMP2:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[B:%.*]]
1584 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
1585 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
1586 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1587 // CHECK-NEXT: ret i16 [[TMP5]]
1589 mve_pred16_t
test_vcmpcsq_m_u16(uint16x8_t a
, uint16x8_t b
, mve_pred16_t p
)
1592 return vcmpcsq_m(a
, b
, p
);
1593 #else /* POLYMORPHIC */
1594 return vcmpcsq_m_u16(a
, b
, p
);
1595 #endif /* POLYMORPHIC */
1598 // CHECK-LABEL: @test_vcmpcsq_m_u32(
1599 // CHECK-NEXT: entry:
1600 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1601 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1602 // CHECK-NEXT: [[TMP2:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[B:%.*]]
1603 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
1604 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
1605 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1606 // CHECK-NEXT: ret i16 [[TMP5]]
1608 mve_pred16_t
test_vcmpcsq_m_u32(uint32x4_t a
, uint32x4_t b
, mve_pred16_t p
)
1611 return vcmpcsq_m(a
, b
, p
);
1612 #else /* POLYMORPHIC */
1613 return vcmpcsq_m_u32(a
, b
, p
);
1614 #endif /* POLYMORPHIC */
1617 // CHECK-LABEL: @test_vcmpgeq_m_n_f16(
1618 // CHECK-NEXT: entry:
1619 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1620 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1621 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
1622 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
1623 // CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <8 x half> [[A:%.*]], [[DOTSPLAT]]
1624 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
1625 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
1626 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1627 // CHECK-NEXT: ret i16 [[TMP5]]
1629 mve_pred16_t
test_vcmpgeq_m_n_f16(float16x8_t a
, float16_t b
, mve_pred16_t p
)
1632 return vcmpgeq_m(a
, b
, p
);
1633 #else /* POLYMORPHIC */
1634 return vcmpgeq_m_n_f16(a
, b
, p
);
1635 #endif /* POLYMORPHIC */
1638 // CHECK-LABEL: @test_vcmpgeq_m_n_f32(
1639 // CHECK-NEXT: entry:
1640 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1641 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1642 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
1643 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
1644 // CHECK-NEXT: [[TMP2:%.*]] = fcmp oge <4 x float> [[A:%.*]], [[DOTSPLAT]]
1645 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
1646 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
1647 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1648 // CHECK-NEXT: ret i16 [[TMP5]]
1650 mve_pred16_t
test_vcmpgeq_m_n_f32(float32x4_t a
, float32_t b
, mve_pred16_t p
)
1653 return vcmpgeq_m(a
, b
, p
);
1654 #else /* POLYMORPHIC */
1655 return vcmpgeq_m_n_f32(a
, b
, p
);
1656 #endif /* POLYMORPHIC */
1659 // CHECK-LABEL: @test_vcmpgeq_m_n_s8(
1660 // CHECK-NEXT: entry:
1661 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1662 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1663 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
1664 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
1665 // CHECK-NEXT: [[TMP2:%.*]] = icmp sge <16 x i8> [[A:%.*]], [[DOTSPLAT]]
1666 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
1667 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
1668 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1669 // CHECK-NEXT: ret i16 [[TMP5]]
1671 mve_pred16_t
test_vcmpgeq_m_n_s8(int8x16_t a
, int8_t b
, mve_pred16_t p
)
1674 return vcmpgeq_m(a
, b
, p
);
1675 #else /* POLYMORPHIC */
1676 return vcmpgeq_m_n_s8(a
, b
, p
);
1677 #endif /* POLYMORPHIC */
1680 // CHECK-LABEL: @test_vcmpgeq_m_n_s16(
1681 // CHECK-NEXT: entry:
1682 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1683 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1684 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
1685 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
1686 // CHECK-NEXT: [[TMP2:%.*]] = icmp sge <8 x i16> [[A:%.*]], [[DOTSPLAT]]
1687 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
1688 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
1689 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1690 // CHECK-NEXT: ret i16 [[TMP5]]
1692 mve_pred16_t
test_vcmpgeq_m_n_s16(int16x8_t a
, int16_t b
, mve_pred16_t p
)
1695 return vcmpgeq_m(a
, b
, p
);
1696 #else /* POLYMORPHIC */
1697 return vcmpgeq_m_n_s16(a
, b
, p
);
1698 #endif /* POLYMORPHIC */
1701 // CHECK-LABEL: @test_vcmpgeq_m_n_s32(
1702 // CHECK-NEXT: entry:
1703 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1704 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1705 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
1706 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
1707 // CHECK-NEXT: [[TMP2:%.*]] = icmp sge <4 x i32> [[A:%.*]], [[DOTSPLAT]]
1708 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
1709 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
1710 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1711 // CHECK-NEXT: ret i16 [[TMP5]]
1713 mve_pred16_t
test_vcmpgeq_m_n_s32(int32x4_t a
, int32_t b
, mve_pred16_t p
)
1716 return vcmpgeq_m(a
, b
, p
);
1717 #else /* POLYMORPHIC */
1718 return vcmpgeq_m_n_s32(a
, b
, p
);
1719 #endif /* POLYMORPHIC */
1722 // CHECK-LABEL: @test_vcmpcsq_m_n_u8(
1723 // CHECK-NEXT: entry:
1724 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1725 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
1726 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
1727 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
1728 // CHECK-NEXT: [[TMP2:%.*]] = icmp uge <16 x i8> [[A:%.*]], [[DOTSPLAT]]
1729 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
1730 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
1731 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1732 // CHECK-NEXT: ret i16 [[TMP5]]
1734 mve_pred16_t
test_vcmpcsq_m_n_u8(uint8x16_t a
, uint8_t b
, mve_pred16_t p
)
1737 return vcmpcsq_m(a
, b
, p
);
1738 #else /* POLYMORPHIC */
1739 return vcmpcsq_m_n_u8(a
, b
, p
);
1740 #endif /* POLYMORPHIC */
1743 // CHECK-LABEL: @test_vcmpcsq_m_n_u16(
1744 // CHECK-NEXT: entry:
1745 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1746 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
1747 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
1748 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
1749 // CHECK-NEXT: [[TMP2:%.*]] = icmp uge <8 x i16> [[A:%.*]], [[DOTSPLAT]]
1750 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
1751 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
1752 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1753 // CHECK-NEXT: ret i16 [[TMP5]]
1755 mve_pred16_t
test_vcmpcsq_m_n_u16(uint16x8_t a
, uint16_t b
, mve_pred16_t p
)
1758 return vcmpcsq_m(a
, b
, p
);
1759 #else /* POLYMORPHIC */
1760 return vcmpcsq_m_n_u16(a
, b
, p
);
1761 #endif /* POLYMORPHIC */
1764 // CHECK-LABEL: @test_vcmpcsq_m_n_u32(
1765 // CHECK-NEXT: entry:
1766 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
1767 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
1768 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
1769 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
1770 // CHECK-NEXT: [[TMP2:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[DOTSPLAT]]
1771 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
1772 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
1773 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
1774 // CHECK-NEXT: ret i16 [[TMP5]]
1776 mve_pred16_t
test_vcmpcsq_m_n_u32(uint32x4_t a
, uint32_t b
, mve_pred16_t p
)
1779 return vcmpcsq_m(a
, b
, p
);
1780 #else /* POLYMORPHIC */
1781 return vcmpcsq_m_n_u32(a
, b
, p
);
1782 #endif /* POLYMORPHIC */
1785 // CHECK-LABEL: @test_vcmpgtq_f16(
1786 // CHECK-NEXT: entry:
1787 // CHECK-NEXT: [[TMP0:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[B:%.*]]
1788 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1789 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1790 // CHECK-NEXT: ret i16 [[TMP2]]
1792 mve_pred16_t
test_vcmpgtq_f16(float16x8_t a
, float16x8_t b
)
1795 return vcmpgtq(a
, b
);
1796 #else /* POLYMORPHIC */
1797 return vcmpgtq_f16(a
, b
);
1798 #endif /* POLYMORPHIC */
1801 // CHECK-LABEL: @test_vcmpgtq_f32(
1802 // CHECK-NEXT: entry:
1803 // CHECK-NEXT: [[TMP0:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[B:%.*]]
1804 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1805 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1806 // CHECK-NEXT: ret i16 [[TMP2]]
1808 mve_pred16_t
test_vcmpgtq_f32(float32x4_t a
, float32x4_t b
)
1811 return vcmpgtq(a
, b
);
1812 #else /* POLYMORPHIC */
1813 return vcmpgtq_f32(a
, b
);
1814 #endif /* POLYMORPHIC */
1817 // CHECK-LABEL: @test_vcmpgtq_s8(
1818 // CHECK-NEXT: entry:
1819 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[B:%.*]]
1820 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
1821 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1822 // CHECK-NEXT: ret i16 [[TMP2]]
1824 mve_pred16_t
test_vcmpgtq_s8(int8x16_t a
, int8x16_t b
)
1827 return vcmpgtq(a
, b
);
1828 #else /* POLYMORPHIC */
1829 return vcmpgtq_s8(a
, b
);
1830 #endif /* POLYMORPHIC */
1833 // CHECK-LABEL: @test_vcmpgtq_s16(
1834 // CHECK-NEXT: entry:
1835 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[B:%.*]]
1836 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1837 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1838 // CHECK-NEXT: ret i16 [[TMP2]]
1840 mve_pred16_t
test_vcmpgtq_s16(int16x8_t a
, int16x8_t b
)
1843 return vcmpgtq(a
, b
);
1844 #else /* POLYMORPHIC */
1845 return vcmpgtq_s16(a
, b
);
1846 #endif /* POLYMORPHIC */
1849 // CHECK-LABEL: @test_vcmpgtq_s32(
1850 // CHECK-NEXT: entry:
1851 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[B:%.*]]
1852 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1853 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1854 // CHECK-NEXT: ret i16 [[TMP2]]
1856 mve_pred16_t
test_vcmpgtq_s32(int32x4_t a
, int32x4_t b
)
1859 return vcmpgtq(a
, b
);
1860 #else /* POLYMORPHIC */
1861 return vcmpgtq_s32(a
, b
);
1862 #endif /* POLYMORPHIC */
1865 // CHECK-LABEL: @test_vcmphiq_u8(
1866 // CHECK-NEXT: entry:
1867 // CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[B:%.*]]
1868 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
1869 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1870 // CHECK-NEXT: ret i16 [[TMP2]]
1872 mve_pred16_t
test_vcmphiq_u8(uint8x16_t a
, uint8x16_t b
)
1875 return vcmphiq(a
, b
);
1876 #else /* POLYMORPHIC */
1877 return vcmphiq_u8(a
, b
);
1878 #endif /* POLYMORPHIC */
1881 // CHECK-LABEL: @test_vcmphiq_u16(
1882 // CHECK-NEXT: entry:
1883 // CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[B:%.*]]
1884 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1885 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1886 // CHECK-NEXT: ret i16 [[TMP2]]
1888 mve_pred16_t
test_vcmphiq_u16(uint16x8_t a
, uint16x8_t b
)
1891 return vcmphiq(a
, b
);
1892 #else /* POLYMORPHIC */
1893 return vcmphiq_u16(a
, b
);
1894 #endif /* POLYMORPHIC */
1897 // CHECK-LABEL: @test_vcmphiq_u32(
1898 // CHECK-NEXT: entry:
1899 // CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[B:%.*]]
1900 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1901 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1902 // CHECK-NEXT: ret i16 [[TMP2]]
1904 mve_pred16_t
test_vcmphiq_u32(uint32x4_t a
, uint32x4_t b
)
1907 return vcmphiq(a
, b
);
1908 #else /* POLYMORPHIC */
1909 return vcmphiq_u32(a
, b
);
1910 #endif /* POLYMORPHIC */
1913 // CHECK-LABEL: @test_vcmpgtq_n_f16(
1914 // CHECK-NEXT: entry:
1915 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
1916 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
1917 // CHECK-NEXT: [[TMP0:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[DOTSPLAT]]
1918 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1919 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1920 // CHECK-NEXT: ret i16 [[TMP2]]
1922 mve_pred16_t
test_vcmpgtq_n_f16(float16x8_t a
, float16_t b
)
1925 return vcmpgtq(a
, b
);
1926 #else /* POLYMORPHIC */
1927 return vcmpgtq_n_f16(a
, b
);
1928 #endif /* POLYMORPHIC */
1931 // CHECK-LABEL: @test_vcmpgtq_n_f32(
1932 // CHECK-NEXT: entry:
1933 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
1934 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
1935 // CHECK-NEXT: [[TMP0:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[DOTSPLAT]]
1936 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1937 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1938 // CHECK-NEXT: ret i16 [[TMP2]]
1940 mve_pred16_t
test_vcmpgtq_n_f32(float32x4_t a
, float32_t b
)
1943 return vcmpgtq(a
, b
);
1944 #else /* POLYMORPHIC */
1945 return vcmpgtq_n_f32(a
, b
);
1946 #endif /* POLYMORPHIC */
1949 // CHECK-LABEL: @test_vcmpgtq_n_s8(
1950 // CHECK-NEXT: entry:
1951 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
1952 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
1953 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
1954 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
1955 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1956 // CHECK-NEXT: ret i16 [[TMP2]]
1958 mve_pred16_t
test_vcmpgtq_n_s8(int8x16_t a
, int8_t b
)
1961 return vcmpgtq(a
, b
);
1962 #else /* POLYMORPHIC */
1963 return vcmpgtq_n_s8(a
, b
);
1964 #endif /* POLYMORPHIC */
1967 // CHECK-LABEL: @test_vcmpgtq_n_s16(
1968 // CHECK-NEXT: entry:
1969 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
1970 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
1971 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
1972 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
1973 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1974 // CHECK-NEXT: ret i16 [[TMP2]]
1976 mve_pred16_t
test_vcmpgtq_n_s16(int16x8_t a
, int16_t b
)
1979 return vcmpgtq(a
, b
);
1980 #else /* POLYMORPHIC */
1981 return vcmpgtq_n_s16(a
, b
);
1982 #endif /* POLYMORPHIC */
1985 // CHECK-LABEL: @test_vcmpgtq_n_s32(
1986 // CHECK-NEXT: entry:
1987 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
1988 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
1989 // CHECK-NEXT: [[TMP0:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
1990 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
1991 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
1992 // CHECK-NEXT: ret i16 [[TMP2]]
1994 mve_pred16_t
test_vcmpgtq_n_s32(int32x4_t a
, int32_t b
)
1997 return vcmpgtq(a
, b
);
1998 #else /* POLYMORPHIC */
1999 return vcmpgtq_n_s32(a
, b
);
2000 #endif /* POLYMORPHIC */
2003 // CHECK-LABEL: @test_vcmphiq_n_u8(
2004 // CHECK-NEXT: entry:
2005 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
2006 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
2007 // CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
2008 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
2009 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2010 // CHECK-NEXT: ret i16 [[TMP2]]
2012 mve_pred16_t
test_vcmphiq_n_u8(uint8x16_t a
, uint8_t b
)
2015 return vcmphiq(a
, b
);
2016 #else /* POLYMORPHIC */
2017 return vcmphiq_n_u8(a
, b
);
2018 #endif /* POLYMORPHIC */
2021 // CHECK-LABEL: @test_vcmphiq_n_u16(
2022 // CHECK-NEXT: entry:
2023 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
2024 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
2025 // CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
2026 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
2027 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2028 // CHECK-NEXT: ret i16 [[TMP2]]
2030 mve_pred16_t
test_vcmphiq_n_u16(uint16x8_t a
, uint16_t b
)
2033 return vcmphiq(a
, b
);
2034 #else /* POLYMORPHIC */
2035 return vcmphiq_n_u16(a
, b
);
2036 #endif /* POLYMORPHIC */
2039 // CHECK-LABEL: @test_vcmphiq_n_u32(
2040 // CHECK-NEXT: entry:
2041 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
2042 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
2043 // CHECK-NEXT: [[TMP0:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
2044 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
2045 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2046 // CHECK-NEXT: ret i16 [[TMP2]]
2048 mve_pred16_t
test_vcmphiq_n_u32(uint32x4_t a
, uint32_t b
)
2051 return vcmphiq(a
, b
);
2052 #else /* POLYMORPHIC */
2053 return vcmphiq_n_u32(a
, b
);
2054 #endif /* POLYMORPHIC */
2057 // CHECK-LABEL: @test_vcmpgtq_m_f16(
2058 // CHECK-NEXT: entry:
2059 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2060 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2061 // CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[B:%.*]]
2062 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2063 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2064 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2065 // CHECK-NEXT: ret i16 [[TMP5]]
2067 mve_pred16_t
test_vcmpgtq_m_f16(float16x8_t a
, float16x8_t b
, mve_pred16_t p
)
2070 return vcmpgtq_m(a
, b
, p
);
2071 #else /* POLYMORPHIC */
2072 return vcmpgtq_m_f16(a
, b
, p
);
2073 #endif /* POLYMORPHIC */
2076 // CHECK-LABEL: @test_vcmpgtq_m_f32(
2077 // CHECK-NEXT: entry:
2078 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2079 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2080 // CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[B:%.*]]
2081 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2082 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2083 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2084 // CHECK-NEXT: ret i16 [[TMP5]]
2086 mve_pred16_t
test_vcmpgtq_m_f32(float32x4_t a
, float32x4_t b
, mve_pred16_t p
)
2089 return vcmpgtq_m(a
, b
, p
);
2090 #else /* POLYMORPHIC */
2091 return vcmpgtq_m_f32(a
, b
, p
);
2092 #endif /* POLYMORPHIC */
2095 // CHECK-LABEL: @test_vcmpgtq_m_s8(
2096 // CHECK-NEXT: entry:
2097 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2098 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
2099 // CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[B:%.*]]
2100 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
2101 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
2102 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2103 // CHECK-NEXT: ret i16 [[TMP5]]
2105 mve_pred16_t
test_vcmpgtq_m_s8(int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
2108 return vcmpgtq_m(a
, b
, p
);
2109 #else /* POLYMORPHIC */
2110 return vcmpgtq_m_s8(a
, b
, p
);
2111 #endif /* POLYMORPHIC */
2114 // CHECK-LABEL: @test_vcmpgtq_m_s16(
2115 // CHECK-NEXT: entry:
2116 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2117 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2118 // CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[B:%.*]]
2119 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2120 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2121 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2122 // CHECK-NEXT: ret i16 [[TMP5]]
2124 mve_pred16_t
test_vcmpgtq_m_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
2127 return vcmpgtq_m(a
, b
, p
);
2128 #else /* POLYMORPHIC */
2129 return vcmpgtq_m_s16(a
, b
, p
);
2130 #endif /* POLYMORPHIC */
2133 // CHECK-LABEL: @test_vcmpgtq_m_s32(
2134 // CHECK-NEXT: entry:
2135 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2136 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2137 // CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[B:%.*]]
2138 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2139 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2140 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2141 // CHECK-NEXT: ret i16 [[TMP5]]
2143 mve_pred16_t
test_vcmpgtq_m_s32(int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
2146 return vcmpgtq_m(a
, b
, p
);
2147 #else /* POLYMORPHIC */
2148 return vcmpgtq_m_s32(a
, b
, p
);
2149 #endif /* POLYMORPHIC */
2152 // CHECK-LABEL: @test_vcmphiq_m_u8(
2153 // CHECK-NEXT: entry:
2154 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2155 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
2156 // CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[B:%.*]]
2157 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
2158 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
2159 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2160 // CHECK-NEXT: ret i16 [[TMP5]]
2162 mve_pred16_t
test_vcmphiq_m_u8(uint8x16_t a
, uint8x16_t b
, mve_pred16_t p
)
2165 return vcmphiq_m(a
, b
, p
);
2166 #else /* POLYMORPHIC */
2167 return vcmphiq_m_u8(a
, b
, p
);
2168 #endif /* POLYMORPHIC */
2171 // CHECK-LABEL: @test_vcmphiq_m_u16(
2172 // CHECK-NEXT: entry:
2173 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2174 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2175 // CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[B:%.*]]
2176 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2177 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2178 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2179 // CHECK-NEXT: ret i16 [[TMP5]]
2181 mve_pred16_t
test_vcmphiq_m_u16(uint16x8_t a
, uint16x8_t b
, mve_pred16_t p
)
2184 return vcmphiq_m(a
, b
, p
);
2185 #else /* POLYMORPHIC */
2186 return vcmphiq_m_u16(a
, b
, p
);
2187 #endif /* POLYMORPHIC */
2190 // CHECK-LABEL: @test_vcmphiq_m_u32(
2191 // CHECK-NEXT: entry:
2192 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2193 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2194 // CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[B:%.*]]
2195 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2196 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2197 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2198 // CHECK-NEXT: ret i16 [[TMP5]]
2200 mve_pred16_t
test_vcmphiq_m_u32(uint32x4_t a
, uint32x4_t b
, mve_pred16_t p
)
2203 return vcmphiq_m(a
, b
, p
);
2204 #else /* POLYMORPHIC */
2205 return vcmphiq_m_u32(a
, b
, p
);
2206 #endif /* POLYMORPHIC */
2209 // CHECK-LABEL: @test_vcmpgtq_m_n_f16(
2210 // CHECK-NEXT: entry:
2211 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2212 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2213 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
2214 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
2215 // CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <8 x half> [[A:%.*]], [[DOTSPLAT]]
2216 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2217 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2218 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2219 // CHECK-NEXT: ret i16 [[TMP5]]
2221 mve_pred16_t
test_vcmpgtq_m_n_f16(float16x8_t a
, float16_t b
, mve_pred16_t p
)
2224 return vcmpgtq_m(a
, b
, p
);
2225 #else /* POLYMORPHIC */
2226 return vcmpgtq_m_n_f16(a
, b
, p
);
2227 #endif /* POLYMORPHIC */
2230 // CHECK-LABEL: @test_vcmpgtq_m_n_f32(
2231 // CHECK-NEXT: entry:
2232 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2233 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2234 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
2235 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
2236 // CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[DOTSPLAT]]
2237 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2238 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2239 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2240 // CHECK-NEXT: ret i16 [[TMP5]]
2242 mve_pred16_t
test_vcmpgtq_m_n_f32(float32x4_t a
, float32_t b
, mve_pred16_t p
)
2245 return vcmpgtq_m(a
, b
, p
);
2246 #else /* POLYMORPHIC */
2247 return vcmpgtq_m_n_f32(a
, b
, p
);
2248 #endif /* POLYMORPHIC */
2251 // CHECK-LABEL: @test_vcmpgtq_m_n_s8(
2252 // CHECK-NEXT: entry:
2253 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2254 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
2255 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
2256 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
2257 // CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
2258 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
2259 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
2260 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2261 // CHECK-NEXT: ret i16 [[TMP5]]
2263 mve_pred16_t
test_vcmpgtq_m_n_s8(int8x16_t a
, int8_t b
, mve_pred16_t p
)
2266 return vcmpgtq_m(a
, b
, p
);
2267 #else /* POLYMORPHIC */
2268 return vcmpgtq_m_n_s8(a
, b
, p
);
2269 #endif /* POLYMORPHIC */
2272 // CHECK-LABEL: @test_vcmpgtq_m_n_s16(
2273 // CHECK-NEXT: entry:
2274 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2275 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2276 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
2277 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
2278 // CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
2279 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2280 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2281 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2282 // CHECK-NEXT: ret i16 [[TMP5]]
2284 mve_pred16_t
test_vcmpgtq_m_n_s16(int16x8_t a
, int16_t b
, mve_pred16_t p
)
2287 return vcmpgtq_m(a
, b
, p
);
2288 #else /* POLYMORPHIC */
2289 return vcmpgtq_m_n_s16(a
, b
, p
);
2290 #endif /* POLYMORPHIC */
2293 // CHECK-LABEL: @test_vcmpgtq_m_n_s32(
2294 // CHECK-NEXT: entry:
2295 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2296 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2297 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
2298 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
2299 // CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
2300 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2301 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2302 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2303 // CHECK-NEXT: ret i16 [[TMP5]]
2305 mve_pred16_t
test_vcmpgtq_m_n_s32(int32x4_t a
, int32_t b
, mve_pred16_t p
)
2308 return vcmpgtq_m(a
, b
, p
);
2309 #else /* POLYMORPHIC */
2310 return vcmpgtq_m_n_s32(a
, b
, p
);
2311 #endif /* POLYMORPHIC */
2314 // CHECK-LABEL: @test_vcmphiq_m_n_u8(
2315 // CHECK-NEXT: entry:
2316 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2317 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
2318 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
2319 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
2320 // CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
2321 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
2322 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
2323 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2324 // CHECK-NEXT: ret i16 [[TMP5]]
2326 mve_pred16_t
test_vcmphiq_m_n_u8(uint8x16_t a
, uint8_t b
, mve_pred16_t p
)
2329 return vcmphiq_m(a
, b
, p
);
2330 #else /* POLYMORPHIC */
2331 return vcmphiq_m_n_u8(a
, b
, p
);
2332 #endif /* POLYMORPHIC */
2335 // CHECK-LABEL: @test_vcmphiq_m_n_u16(
2336 // CHECK-NEXT: entry:
2337 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2338 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2339 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
2340 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
2341 // CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
2342 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2343 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2344 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2345 // CHECK-NEXT: ret i16 [[TMP5]]
2347 mve_pred16_t
test_vcmphiq_m_n_u16(uint16x8_t a
, uint16_t b
, mve_pred16_t p
)
2350 return vcmphiq_m(a
, b
, p
);
2351 #else /* POLYMORPHIC */
2352 return vcmphiq_m_n_u16(a
, b
, p
);
2353 #endif /* POLYMORPHIC */
2356 // CHECK-LABEL: @test_vcmphiq_m_n_u32(
2357 // CHECK-NEXT: entry:
2358 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2359 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2360 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
2361 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
2362 // CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
2363 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2364 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2365 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2366 // CHECK-NEXT: ret i16 [[TMP5]]
2368 mve_pred16_t
test_vcmphiq_m_n_u32(uint32x4_t a
, uint32_t b
, mve_pred16_t p
)
2371 return vcmphiq_m(a
, b
, p
);
2372 #else /* POLYMORPHIC */
2373 return vcmphiq_m_n_u32(a
, b
, p
);
2374 #endif /* POLYMORPHIC */
2377 // CHECK-LABEL: @test_vcmpleq_f16(
2378 // CHECK-NEXT: entry:
2379 // CHECK-NEXT: [[TMP0:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[B:%.*]]
2380 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
2381 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2382 // CHECK-NEXT: ret i16 [[TMP2]]
2384 mve_pred16_t
test_vcmpleq_f16(float16x8_t a
, float16x8_t b
)
2387 return vcmpleq(a
, b
);
2388 #else /* POLYMORPHIC */
2389 return vcmpleq_f16(a
, b
);
2390 #endif /* POLYMORPHIC */
2393 // CHECK-LABEL: @test_vcmpleq_f32(
2394 // CHECK-NEXT: entry:
2395 // CHECK-NEXT: [[TMP0:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[B:%.*]]
2396 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
2397 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2398 // CHECK-NEXT: ret i16 [[TMP2]]
2400 mve_pred16_t
test_vcmpleq_f32(float32x4_t a
, float32x4_t b
)
2403 return vcmpleq(a
, b
);
2404 #else /* POLYMORPHIC */
2405 return vcmpleq_f32(a
, b
);
2406 #endif /* POLYMORPHIC */
2409 // CHECK-LABEL: @test_vcmpleq_s8(
2410 // CHECK-NEXT: entry:
2411 // CHECK-NEXT: [[TMP0:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[B:%.*]]
2412 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
2413 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2414 // CHECK-NEXT: ret i16 [[TMP2]]
2416 mve_pred16_t
test_vcmpleq_s8(int8x16_t a
, int8x16_t b
)
2419 return vcmpleq(a
, b
);
2420 #else /* POLYMORPHIC */
2421 return vcmpleq_s8(a
, b
);
2422 #endif /* POLYMORPHIC */
2425 // CHECK-LABEL: @test_vcmpleq_s16(
2426 // CHECK-NEXT: entry:
2427 // CHECK-NEXT: [[TMP0:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[B:%.*]]
2428 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
2429 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2430 // CHECK-NEXT: ret i16 [[TMP2]]
2432 mve_pred16_t
test_vcmpleq_s16(int16x8_t a
, int16x8_t b
)
2435 return vcmpleq(a
, b
);
2436 #else /* POLYMORPHIC */
2437 return vcmpleq_s16(a
, b
);
2438 #endif /* POLYMORPHIC */
2441 // CHECK-LABEL: @test_vcmpleq_s32(
2442 // CHECK-NEXT: entry:
2443 // CHECK-NEXT: [[TMP0:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[B:%.*]]
2444 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
2445 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2446 // CHECK-NEXT: ret i16 [[TMP2]]
2448 mve_pred16_t
test_vcmpleq_s32(int32x4_t a
, int32x4_t b
)
2451 return vcmpleq(a
, b
);
2452 #else /* POLYMORPHIC */
2453 return vcmpleq_s32(a
, b
);
2454 #endif /* POLYMORPHIC */
2457 // CHECK-LABEL: @test_vcmpleq_n_f16(
2458 // CHECK-NEXT: entry:
2459 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
2460 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
2461 // CHECK-NEXT: [[TMP0:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[DOTSPLAT]]
2462 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
2463 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2464 // CHECK-NEXT: ret i16 [[TMP2]]
2466 mve_pred16_t
test_vcmpleq_n_f16(float16x8_t a
, float16_t b
)
2469 return vcmpleq(a
, b
);
2470 #else /* POLYMORPHIC */
2471 return vcmpleq_n_f16(a
, b
);
2472 #endif /* POLYMORPHIC */
2475 // CHECK-LABEL: @test_vcmpleq_n_f32(
2476 // CHECK-NEXT: entry:
2477 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
2478 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
2479 // CHECK-NEXT: [[TMP0:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[DOTSPLAT]]
2480 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
2481 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2482 // CHECK-NEXT: ret i16 [[TMP2]]
2484 mve_pred16_t
test_vcmpleq_n_f32(float32x4_t a
, float32_t b
)
2487 return vcmpleq(a
, b
);
2488 #else /* POLYMORPHIC */
2489 return vcmpleq_n_f32(a
, b
);
2490 #endif /* POLYMORPHIC */
2493 // CHECK-LABEL: @test_vcmpleq_n_s8(
2494 // CHECK-NEXT: entry:
2495 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
2496 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
2497 // CHECK-NEXT: [[TMP0:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[DOTSPLAT]]
2498 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
2499 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2500 // CHECK-NEXT: ret i16 [[TMP2]]
2502 mve_pred16_t
test_vcmpleq_n_s8(int8x16_t a
, int8_t b
)
2505 return vcmpleq(a
, b
);
2506 #else /* POLYMORPHIC */
2507 return vcmpleq_n_s8(a
, b
);
2508 #endif /* POLYMORPHIC */
2511 // CHECK-LABEL: @test_vcmpleq_n_s16(
2512 // CHECK-NEXT: entry:
2513 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
2514 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
2515 // CHECK-NEXT: [[TMP0:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[DOTSPLAT]]
2516 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
2517 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2518 // CHECK-NEXT: ret i16 [[TMP2]]
2520 mve_pred16_t
test_vcmpleq_n_s16(int16x8_t a
, int16_t b
)
2523 return vcmpleq(a
, b
);
2524 #else /* POLYMORPHIC */
2525 return vcmpleq_n_s16(a
, b
);
2526 #endif /* POLYMORPHIC */
2529 // CHECK-LABEL: @test_vcmpleq_n_s32(
2530 // CHECK-NEXT: entry:
2531 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
2532 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
2533 // CHECK-NEXT: [[TMP0:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[DOTSPLAT]]
2534 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
2535 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2536 // CHECK-NEXT: ret i16 [[TMP2]]
2538 mve_pred16_t
test_vcmpleq_n_s32(int32x4_t a
, int32_t b
)
2541 return vcmpleq(a
, b
);
2542 #else /* POLYMORPHIC */
2543 return vcmpleq_n_s32(a
, b
);
2544 #endif /* POLYMORPHIC */
2547 // CHECK-LABEL: @test_vcmpleq_m_f16(
2548 // CHECK-NEXT: entry:
2549 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2550 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2551 // CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[B:%.*]]
2552 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2553 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2554 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2555 // CHECK-NEXT: ret i16 [[TMP5]]
2557 mve_pred16_t
test_vcmpleq_m_f16(float16x8_t a
, float16x8_t b
, mve_pred16_t p
)
2560 return vcmpleq_m(a
, b
, p
);
2561 #else /* POLYMORPHIC */
2562 return vcmpleq_m_f16(a
, b
, p
);
2563 #endif /* POLYMORPHIC */
2566 // CHECK-LABEL: @test_vcmpleq_m_f32(
2567 // CHECK-NEXT: entry:
2568 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2569 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2570 // CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[B:%.*]]
2571 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2572 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2573 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2574 // CHECK-NEXT: ret i16 [[TMP5]]
2576 mve_pred16_t
test_vcmpleq_m_f32(float32x4_t a
, float32x4_t b
, mve_pred16_t p
)
2579 return vcmpleq_m(a
, b
, p
);
2580 #else /* POLYMORPHIC */
2581 return vcmpleq_m_f32(a
, b
, p
);
2582 #endif /* POLYMORPHIC */
2585 // CHECK-LABEL: @test_vcmpleq_m_s8(
2586 // CHECK-NEXT: entry:
2587 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2588 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
2589 // CHECK-NEXT: [[TMP2:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[B:%.*]]
2590 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
2591 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
2592 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2593 // CHECK-NEXT: ret i16 [[TMP5]]
2595 mve_pred16_t
test_vcmpleq_m_s8(int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
2598 return vcmpleq_m(a
, b
, p
);
2599 #else /* POLYMORPHIC */
2600 return vcmpleq_m_s8(a
, b
, p
);
2601 #endif /* POLYMORPHIC */
2604 // CHECK-LABEL: @test_vcmpleq_m_s16(
2605 // CHECK-NEXT: entry:
2606 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2607 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2608 // CHECK-NEXT: [[TMP2:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[B:%.*]]
2609 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2610 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2611 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2612 // CHECK-NEXT: ret i16 [[TMP5]]
2614 mve_pred16_t
test_vcmpleq_m_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
2617 return vcmpleq_m(a
, b
, p
);
2618 #else /* POLYMORPHIC */
2619 return vcmpleq_m_s16(a
, b
, p
);
2620 #endif /* POLYMORPHIC */
2623 // CHECK-LABEL: @test_vcmpleq_m_s32(
2624 // CHECK-NEXT: entry:
2625 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2626 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2627 // CHECK-NEXT: [[TMP2:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[B:%.*]]
2628 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2629 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2630 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2631 // CHECK-NEXT: ret i16 [[TMP5]]
2633 mve_pred16_t
test_vcmpleq_m_s32(int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
2636 return vcmpleq_m(a
, b
, p
);
2637 #else /* POLYMORPHIC */
2638 return vcmpleq_m_s32(a
, b
, p
);
2639 #endif /* POLYMORPHIC */
2642 // CHECK-LABEL: @test_vcmpleq_m_n_f16(
2643 // CHECK-NEXT: entry:
2644 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2645 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2646 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
2647 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
2648 // CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <8 x half> [[A:%.*]], [[DOTSPLAT]]
2649 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2650 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2651 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2652 // CHECK-NEXT: ret i16 [[TMP5]]
2654 mve_pred16_t
test_vcmpleq_m_n_f16(float16x8_t a
, float16_t b
, mve_pred16_t p
)
2657 return vcmpleq_m(a
, b
, p
);
2658 #else /* POLYMORPHIC */
2659 return vcmpleq_m_n_f16(a
, b
, p
);
2660 #endif /* POLYMORPHIC */
2663 // CHECK-LABEL: @test_vcmpleq_m_n_f32(
2664 // CHECK-NEXT: entry:
2665 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2666 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2667 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
2668 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
2669 // CHECK-NEXT: [[TMP2:%.*]] = fcmp ole <4 x float> [[A:%.*]], [[DOTSPLAT]]
2670 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2671 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2672 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2673 // CHECK-NEXT: ret i16 [[TMP5]]
2675 mve_pred16_t
test_vcmpleq_m_n_f32(float32x4_t a
, float32_t b
, mve_pred16_t p
)
2678 return vcmpleq_m(a
, b
, p
);
2679 #else /* POLYMORPHIC */
2680 return vcmpleq_m_n_f32(a
, b
, p
);
2681 #endif /* POLYMORPHIC */
2684 // CHECK-LABEL: @test_vcmpleq_m_n_s8(
2685 // CHECK-NEXT: entry:
2686 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2687 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
2688 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
2689 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
2690 // CHECK-NEXT: [[TMP2:%.*]] = icmp sle <16 x i8> [[A:%.*]], [[DOTSPLAT]]
2691 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
2692 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
2693 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2694 // CHECK-NEXT: ret i16 [[TMP5]]
2696 mve_pred16_t
test_vcmpleq_m_n_s8(int8x16_t a
, int8_t b
, mve_pred16_t p
)
2699 return vcmpleq_m(a
, b
, p
);
2700 #else /* POLYMORPHIC */
2701 return vcmpleq_m_n_s8(a
, b
, p
);
2702 #endif /* POLYMORPHIC */
2705 // CHECK-LABEL: @test_vcmpleq_m_n_s16(
2706 // CHECK-NEXT: entry:
2707 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2708 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2709 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
2710 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
2711 // CHECK-NEXT: [[TMP2:%.*]] = icmp sle <8 x i16> [[A:%.*]], [[DOTSPLAT]]
2712 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2713 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2714 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2715 // CHECK-NEXT: ret i16 [[TMP5]]
2717 mve_pred16_t
test_vcmpleq_m_n_s16(int16x8_t a
, int16_t b
, mve_pred16_t p
)
2720 return vcmpleq_m(a
, b
, p
);
2721 #else /* POLYMORPHIC */
2722 return vcmpleq_m_n_s16(a
, b
, p
);
2723 #endif /* POLYMORPHIC */
2726 // CHECK-LABEL: @test_vcmpleq_m_n_s32(
2727 // CHECK-NEXT: entry:
2728 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2729 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2730 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
2731 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
2732 // CHECK-NEXT: [[TMP2:%.*]] = icmp sle <4 x i32> [[A:%.*]], [[DOTSPLAT]]
2733 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2734 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2735 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2736 // CHECK-NEXT: ret i16 [[TMP5]]
2738 mve_pred16_t
test_vcmpleq_m_n_s32(int32x4_t a
, int32_t b
, mve_pred16_t p
)
2741 return vcmpleq_m(a
, b
, p
);
2742 #else /* POLYMORPHIC */
2743 return vcmpleq_m_n_s32(a
, b
, p
);
2744 #endif /* POLYMORPHIC */
2747 // CHECK-LABEL: @test_vcmpltq_f16(
2748 // CHECK-NEXT: entry:
2749 // CHECK-NEXT: [[TMP0:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[B:%.*]]
2750 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
2751 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2752 // CHECK-NEXT: ret i16 [[TMP2]]
2754 mve_pred16_t
test_vcmpltq_f16(float16x8_t a
, float16x8_t b
)
2757 return vcmpltq(a
, b
);
2758 #else /* POLYMORPHIC */
2759 return vcmpltq_f16(a
, b
);
2760 #endif /* POLYMORPHIC */
2763 // CHECK-LABEL: @test_vcmpltq_f32(
2764 // CHECK-NEXT: entry:
2765 // CHECK-NEXT: [[TMP0:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[B:%.*]]
2766 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
2767 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2768 // CHECK-NEXT: ret i16 [[TMP2]]
2770 mve_pred16_t
test_vcmpltq_f32(float32x4_t a
, float32x4_t b
)
2773 return vcmpltq(a
, b
);
2774 #else /* POLYMORPHIC */
2775 return vcmpltq_f32(a
, b
);
2776 #endif /* POLYMORPHIC */
2779 // CHECK-LABEL: @test_vcmpltq_s8(
2780 // CHECK-NEXT: entry:
2781 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[B:%.*]]
2782 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
2783 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2784 // CHECK-NEXT: ret i16 [[TMP2]]
2786 mve_pred16_t
test_vcmpltq_s8(int8x16_t a
, int8x16_t b
)
2789 return vcmpltq(a
, b
);
2790 #else /* POLYMORPHIC */
2791 return vcmpltq_s8(a
, b
);
2792 #endif /* POLYMORPHIC */
2795 // CHECK-LABEL: @test_vcmpltq_s16(
2796 // CHECK-NEXT: entry:
2797 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[B:%.*]]
2798 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
2799 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2800 // CHECK-NEXT: ret i16 [[TMP2]]
2802 mve_pred16_t
test_vcmpltq_s16(int16x8_t a
, int16x8_t b
)
2805 return vcmpltq(a
, b
);
2806 #else /* POLYMORPHIC */
2807 return vcmpltq_s16(a
, b
);
2808 #endif /* POLYMORPHIC */
2811 // CHECK-LABEL: @test_vcmpltq_s32(
2812 // CHECK-NEXT: entry:
2813 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[B:%.*]]
2814 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
2815 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2816 // CHECK-NEXT: ret i16 [[TMP2]]
2818 mve_pred16_t
test_vcmpltq_s32(int32x4_t a
, int32x4_t b
)
2821 return vcmpltq(a
, b
);
2822 #else /* POLYMORPHIC */
2823 return vcmpltq_s32(a
, b
);
2824 #endif /* POLYMORPHIC */
2827 // CHECK-LABEL: @test_vcmpltq_n_f16(
2828 // CHECK-NEXT: entry:
2829 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
2830 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
2831 // CHECK-NEXT: [[TMP0:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[DOTSPLAT]]
2832 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
2833 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2834 // CHECK-NEXT: ret i16 [[TMP2]]
2836 mve_pred16_t
test_vcmpltq_n_f16(float16x8_t a
, float16_t b
)
2839 return vcmpltq(a
, b
);
2840 #else /* POLYMORPHIC */
2841 return vcmpltq_n_f16(a
, b
);
2842 #endif /* POLYMORPHIC */
2845 // CHECK-LABEL: @test_vcmpltq_n_f32(
2846 // CHECK-NEXT: entry:
2847 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
2848 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
2849 // CHECK-NEXT: [[TMP0:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[DOTSPLAT]]
2850 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
2851 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2852 // CHECK-NEXT: ret i16 [[TMP2]]
2854 mve_pred16_t
test_vcmpltq_n_f32(float32x4_t a
, float32_t b
)
2857 return vcmpltq(a
, b
);
2858 #else /* POLYMORPHIC */
2859 return vcmpltq_n_f32(a
, b
);
2860 #endif /* POLYMORPHIC */
2863 // CHECK-LABEL: @test_vcmpltq_n_s8(
2864 // CHECK-NEXT: entry:
2865 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
2866 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
2867 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
2868 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP0]])
2869 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2870 // CHECK-NEXT: ret i16 [[TMP2]]
2872 mve_pred16_t
test_vcmpltq_n_s8(int8x16_t a
, int8_t b
)
2875 return vcmpltq(a
, b
);
2876 #else /* POLYMORPHIC */
2877 return vcmpltq_n_s8(a
, b
);
2878 #endif /* POLYMORPHIC */
2881 // CHECK-LABEL: @test_vcmpltq_n_s16(
2882 // CHECK-NEXT: entry:
2883 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
2884 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
2885 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
2886 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP0]])
2887 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2888 // CHECK-NEXT: ret i16 [[TMP2]]
2890 mve_pred16_t
test_vcmpltq_n_s16(int16x8_t a
, int16_t b
)
2893 return vcmpltq(a
, b
);
2894 #else /* POLYMORPHIC */
2895 return vcmpltq_n_s16(a
, b
);
2896 #endif /* POLYMORPHIC */
2899 // CHECK-LABEL: @test_vcmpltq_n_s32(
2900 // CHECK-NEXT: entry:
2901 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
2902 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
2903 // CHECK-NEXT: [[TMP0:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
2904 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP0]])
2905 // CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i16
2906 // CHECK-NEXT: ret i16 [[TMP2]]
2908 mve_pred16_t
test_vcmpltq_n_s32(int32x4_t a
, int32_t b
)
2911 return vcmpltq(a
, b
);
2912 #else /* POLYMORPHIC */
2913 return vcmpltq_n_s32(a
, b
);
2914 #endif /* POLYMORPHIC */
2917 // CHECK-LABEL: @test_vcmpltq_m_f16(
2918 // CHECK-NEXT: entry:
2919 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2920 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2921 // CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[B:%.*]]
2922 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2923 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2924 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2925 // CHECK-NEXT: ret i16 [[TMP5]]
2927 mve_pred16_t
test_vcmpltq_m_f16(float16x8_t a
, float16x8_t b
, mve_pred16_t p
)
2930 return vcmpltq_m(a
, b
, p
);
2931 #else /* POLYMORPHIC */
2932 return vcmpltq_m_f16(a
, b
, p
);
2933 #endif /* POLYMORPHIC */
2936 // CHECK-LABEL: @test_vcmpltq_m_f32(
2937 // CHECK-NEXT: entry:
2938 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2939 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2940 // CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[B:%.*]]
2941 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2942 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
2943 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2944 // CHECK-NEXT: ret i16 [[TMP5]]
2946 mve_pred16_t
test_vcmpltq_m_f32(float32x4_t a
, float32x4_t b
, mve_pred16_t p
)
2949 return vcmpltq_m(a
, b
, p
);
2950 #else /* POLYMORPHIC */
2951 return vcmpltq_m_f32(a
, b
, p
);
2952 #endif /* POLYMORPHIC */
2955 // CHECK-LABEL: @test_vcmpltq_m_s8(
2956 // CHECK-NEXT: entry:
2957 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2958 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
2959 // CHECK-NEXT: [[TMP2:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[B:%.*]]
2960 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
2961 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
2962 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2963 // CHECK-NEXT: ret i16 [[TMP5]]
2965 mve_pred16_t
test_vcmpltq_m_s8(int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
2968 return vcmpltq_m(a
, b
, p
);
2969 #else /* POLYMORPHIC */
2970 return vcmpltq_m_s8(a
, b
, p
);
2971 #endif /* POLYMORPHIC */
2974 // CHECK-LABEL: @test_vcmpltq_m_s16(
2975 // CHECK-NEXT: entry:
2976 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2977 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
2978 // CHECK-NEXT: [[TMP2:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[B:%.*]]
2979 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
2980 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
2981 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
2982 // CHECK-NEXT: ret i16 [[TMP5]]
2984 mve_pred16_t
test_vcmpltq_m_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
2987 return vcmpltq_m(a
, b
, p
);
2988 #else /* POLYMORPHIC */
2989 return vcmpltq_m_s16(a
, b
, p
);
2990 #endif /* POLYMORPHIC */
2993 // CHECK-LABEL: @test_vcmpltq_m_s32(
2994 // CHECK-NEXT: entry:
2995 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
2996 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
2997 // CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[B:%.*]]
2998 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
2999 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
3000 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
3001 // CHECK-NEXT: ret i16 [[TMP5]]
3003 mve_pred16_t
test_vcmpltq_m_s32(int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
3006 return vcmpltq_m(a
, b
, p
);
3007 #else /* POLYMORPHIC */
3008 return vcmpltq_m_s32(a
, b
, p
);
3009 #endif /* POLYMORPHIC */
3012 // CHECK-LABEL: @test_vcmpltq_m_n_f16(
3013 // CHECK-NEXT: entry:
3014 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
3015 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
3016 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x half> poison, half [[B:%.*]], i64 0
3017 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x half> [[DOTSPLATINSERT]], <8 x half> poison, <8 x i32> zeroinitializer
3018 // CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <8 x half> [[A:%.*]], [[DOTSPLAT]]
3019 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
3020 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
3021 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
3022 // CHECK-NEXT: ret i16 [[TMP5]]
3024 mve_pred16_t
test_vcmpltq_m_n_f16(float16x8_t a
, float16_t b
, mve_pred16_t p
)
3027 return vcmpltq_m(a
, b
, p
);
3028 #else /* POLYMORPHIC */
3029 return vcmpltq_m_n_f16(a
, b
, p
);
3030 #endif /* POLYMORPHIC */
3033 // CHECK-LABEL: @test_vcmpltq_m_n_f32(
3034 // CHECK-NEXT: entry:
3035 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
3036 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
3037 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[B:%.*]], i64 0
3038 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
3039 // CHECK-NEXT: [[TMP2:%.*]] = fcmp olt <4 x float> [[A:%.*]], [[DOTSPLAT]]
3040 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
3041 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
3042 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
3043 // CHECK-NEXT: ret i16 [[TMP5]]
3045 mve_pred16_t
test_vcmpltq_m_n_f32(float32x4_t a
, float32_t b
, mve_pred16_t p
)
3048 return vcmpltq_m(a
, b
, p
);
3049 #else /* POLYMORPHIC */
3050 return vcmpltq_m_n_f32(a
, b
, p
);
3051 #endif /* POLYMORPHIC */
3054 // CHECK-LABEL: @test_vcmpltq_m_n_s8(
3055 // CHECK-NEXT: entry:
3056 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
3057 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
3058 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[B:%.*]], i64 0
3059 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <16 x i8> [[DOTSPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
3060 // CHECK-NEXT: [[TMP2:%.*]] = icmp slt <16 x i8> [[A:%.*]], [[DOTSPLAT]]
3061 // CHECK-NEXT: [[TMP3:%.*]] = and <16 x i1> [[TMP1]], [[TMP2]]
3062 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> [[TMP3]])
3063 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
3064 // CHECK-NEXT: ret i16 [[TMP5]]
3066 mve_pred16_t
test_vcmpltq_m_n_s8(int8x16_t a
, int8_t b
, mve_pred16_t p
)
3069 return vcmpltq_m(a
, b
, p
);
3070 #else /* POLYMORPHIC */
3071 return vcmpltq_m_n_s8(a
, b
, p
);
3072 #endif /* POLYMORPHIC */
3075 // CHECK-LABEL: @test_vcmpltq_m_n_s16(
3076 // CHECK-NEXT: entry:
3077 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
3078 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
3079 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B:%.*]], i64 0
3080 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <8 x i16> [[DOTSPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
3081 // CHECK-NEXT: [[TMP2:%.*]] = icmp slt <8 x i16> [[A:%.*]], [[DOTSPLAT]]
3082 // CHECK-NEXT: [[TMP3:%.*]] = and <8 x i1> [[TMP1]], [[TMP2]]
3083 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> [[TMP3]])
3084 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
3085 // CHECK-NEXT: ret i16 [[TMP5]]
3087 mve_pred16_t
test_vcmpltq_m_n_s16(int16x8_t a
, int16_t b
, mve_pred16_t p
)
3090 return vcmpltq_m(a
, b
, p
);
3091 #else /* POLYMORPHIC */
3092 return vcmpltq_m_n_s16(a
, b
, p
);
3093 #endif /* POLYMORPHIC */
3096 // CHECK-LABEL: @test_vcmpltq_m_n_s32(
3097 // CHECK-NEXT: entry:
3098 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
3099 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
3100 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[B:%.*]], i64 0
3101 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
3102 // CHECK-NEXT: [[TMP2:%.*]] = icmp slt <4 x i32> [[A:%.*]], [[DOTSPLAT]]
3103 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i1> [[TMP1]], [[TMP2]]
3104 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> [[TMP3]])
3105 // CHECK-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i16
3106 // CHECK-NEXT: ret i16 [[TMP5]]
3108 mve_pred16_t
test_vcmpltq_m_n_s32(int32x4_t a
, int32_t b
, mve_pred16_t p
)
3111 return vcmpltq_m(a
, b
, p
);
3112 #else /* POLYMORPHIC */
3113 return vcmpltq_m_n_s32(a
, b
, p
);
3114 #endif /* POLYMORPHIC */