1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -passes='mem2reg,sroa,early-cse<>' | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 // CHECK-LABEL: @test_vrndaq_f16(
11 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.round.v8f16(<8 x half> [[A:%.*]])
12 // CHECK-NEXT: ret <8 x half> [[TMP0]]
14 float16x8_t
test_vrndaq_f16(float16x8_t a
)
18 #else /* POLYMORPHIC */
20 #endif /* POLYMORPHIC */
23 // CHECK-LABEL: @test_vrndaq_f32(
25 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.round.v4f32(<4 x float> [[A:%.*]])
26 // CHECK-NEXT: ret <4 x float> [[TMP0]]
28 float32x4_t
test_vrndaq_f32(float32x4_t a
)
32 #else /* POLYMORPHIC */
34 #endif /* POLYMORPHIC */
37 // CHECK-LABEL: @test_vrndmq_f16(
39 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.floor.v8f16(<8 x half> [[A:%.*]])
40 // CHECK-NEXT: ret <8 x half> [[TMP0]]
42 float16x8_t
test_vrndmq_f16(float16x8_t a
)
46 #else /* POLYMORPHIC */
48 #endif /* POLYMORPHIC */
51 // CHECK-LABEL: @test_vrndmq_f32(
53 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.floor.v4f32(<4 x float> [[A:%.*]])
54 // CHECK-NEXT: ret <4 x float> [[TMP0]]
56 float32x4_t
test_vrndmq_f32(float32x4_t a
)
60 #else /* POLYMORPHIC */
62 #endif /* POLYMORPHIC */
65 // CHECK-LABEL: @test_vrndpq_f16(
67 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.ceil.v8f16(<8 x half> [[A:%.*]])
68 // CHECK-NEXT: ret <8 x half> [[TMP0]]
70 float16x8_t
test_vrndpq_f16(float16x8_t a
)
74 #else /* POLYMORPHIC */
76 #endif /* POLYMORPHIC */
79 // CHECK-LABEL: @test_vrndpq_f32(
81 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.ceil.v4f32(<4 x float> [[A:%.*]])
82 // CHECK-NEXT: ret <4 x float> [[TMP0]]
84 float32x4_t
test_vrndpq_f32(float32x4_t a
)
88 #else /* POLYMORPHIC */
90 #endif /* POLYMORPHIC */
93 // CHECK-LABEL: @test_vrndq_f16(
95 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.trunc.v8f16(<8 x half> [[A:%.*]])
96 // CHECK-NEXT: ret <8 x half> [[TMP0]]
98 float16x8_t
test_vrndq_f16(float16x8_t a
)
102 #else /* POLYMORPHIC */
104 #endif /* POLYMORPHIC */
107 // CHECK-LABEL: @test_vrndq_f32(
108 // CHECK-NEXT: entry:
109 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.trunc.v4f32(<4 x float> [[A:%.*]])
110 // CHECK-NEXT: ret <4 x float> [[TMP0]]
112 float32x4_t
test_vrndq_f32(float32x4_t a
)
116 #else /* POLYMORPHIC */
118 #endif /* POLYMORPHIC */
121 // CHECK-LABEL: @test_vrndxq_f16(
122 // CHECK-NEXT: entry:
123 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.rint.v8f16(<8 x half> [[A:%.*]])
124 // CHECK-NEXT: ret <8 x half> [[TMP0]]
126 float16x8_t
test_vrndxq_f16(float16x8_t a
)
130 #else /* POLYMORPHIC */
131 return vrndxq_f16(a
);
132 #endif /* POLYMORPHIC */
135 // CHECK-LABEL: @test_vrndxq_f32(
136 // CHECK-NEXT: entry:
137 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.rint.v4f32(<4 x float> [[A:%.*]])
138 // CHECK-NEXT: ret <4 x float> [[TMP0]]
140 float32x4_t
test_vrndxq_f32(float32x4_t a
)
144 #else /* POLYMORPHIC */
145 return vrndxq_f32(a
);
146 #endif /* POLYMORPHIC */
149 // CHECK-LABEL: @test_vrndnq_f16(
150 // CHECK-NEXT: entry:
151 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vrintn.v8f16(<8 x half> [[A:%.*]])
152 // CHECK-NEXT: ret <8 x half> [[TMP0]]
154 float16x8_t
test_vrndnq_f16(float16x8_t a
)
158 #else /* POLYMORPHIC */
159 return vrndnq_f16(a
);
160 #endif /* POLYMORPHIC */
163 // CHECK-LABEL: @test_vrndnq_f32(
164 // CHECK-NEXT: entry:
165 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vrintn.v4f32(<4 x float> [[A:%.*]])
166 // CHECK-NEXT: ret <4 x float> [[TMP0]]
168 float32x4_t
test_vrndnq_f32(float32x4_t a
)
172 #else /* POLYMORPHIC */
173 return vrndnq_f32(a
);
174 #endif /* POLYMORPHIC */
177 // CHECK-LABEL: @test_vrndaq_m_f16(
178 // CHECK-NEXT: entry:
179 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
180 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
181 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrinta.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
182 // CHECK-NEXT: ret <8 x half> [[TMP2]]
184 float16x8_t
test_vrndaq_m_f16(float16x8_t inactive
, float16x8_t a
, mve_pred16_t p
)
187 return vrndaq_m(inactive
, a
, p
);
188 #else /* POLYMORPHIC */
189 return vrndaq_m_f16(inactive
, a
, p
);
190 #endif /* POLYMORPHIC */
193 // CHECK-LABEL: @test_vrndaq_m_f32(
194 // CHECK-NEXT: entry:
195 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
196 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
197 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrinta.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
198 // CHECK-NEXT: ret <4 x float> [[TMP2]]
200 float32x4_t
test_vrndaq_m_f32(float32x4_t inactive
, float32x4_t a
, mve_pred16_t p
)
203 return vrndaq_m(inactive
, a
, p
);
204 #else /* POLYMORPHIC */
205 return vrndaq_m_f32(inactive
, a
, p
);
206 #endif /* POLYMORPHIC */
209 // CHECK-LABEL: @test_vrndmq_m_f16(
210 // CHECK-NEXT: entry:
211 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
212 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
213 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrintm.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
214 // CHECK-NEXT: ret <8 x half> [[TMP2]]
216 float16x8_t
test_vrndmq_m_f16(float16x8_t inactive
, float16x8_t a
, mve_pred16_t p
)
219 return vrndmq_m(inactive
, a
, p
);
220 #else /* POLYMORPHIC */
221 return vrndmq_m_f16(inactive
, a
, p
);
222 #endif /* POLYMORPHIC */
225 // CHECK-LABEL: @test_vrndmq_m_f32(
226 // CHECK-NEXT: entry:
227 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
228 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
229 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrintm.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
230 // CHECK-NEXT: ret <4 x float> [[TMP2]]
232 float32x4_t
test_vrndmq_m_f32(float32x4_t inactive
, float32x4_t a
, mve_pred16_t p
)
235 return vrndmq_m(inactive
, a
, p
);
236 #else /* POLYMORPHIC */
237 return vrndmq_m_f32(inactive
, a
, p
);
238 #endif /* POLYMORPHIC */
241 // CHECK-LABEL: @test_vrndnq_m_f16(
242 // CHECK-NEXT: entry:
243 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
244 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
245 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrintn.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
246 // CHECK-NEXT: ret <8 x half> [[TMP2]]
248 float16x8_t
test_vrndnq_m_f16(float16x8_t inactive
, float16x8_t a
, mve_pred16_t p
)
251 return vrndnq_m(inactive
, a
, p
);
252 #else /* POLYMORPHIC */
253 return vrndnq_m_f16(inactive
, a
, p
);
254 #endif /* POLYMORPHIC */
257 // CHECK-LABEL: @test_vrndnq_m_f32(
258 // CHECK-NEXT: entry:
259 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
260 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
261 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrintn.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
262 // CHECK-NEXT: ret <4 x float> [[TMP2]]
264 float32x4_t
test_vrndnq_m_f32(float32x4_t inactive
, float32x4_t a
, mve_pred16_t p
)
267 return vrndnq_m(inactive
, a
, p
);
268 #else /* POLYMORPHIC */
269 return vrndnq_m_f32(inactive
, a
, p
);
270 #endif /* POLYMORPHIC */
273 // CHECK-LABEL: @test_vrndpq_m_f16(
274 // CHECK-NEXT: entry:
275 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
276 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
277 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrintp.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
278 // CHECK-NEXT: ret <8 x half> [[TMP2]]
280 float16x8_t
test_vrndpq_m_f16(float16x8_t inactive
, float16x8_t a
, mve_pred16_t p
)
283 return vrndpq_m(inactive
, a
, p
);
284 #else /* POLYMORPHIC */
285 return vrndpq_m_f16(inactive
, a
, p
);
286 #endif /* POLYMORPHIC */
289 // CHECK-LABEL: @test_vrndpq_m_f32(
290 // CHECK-NEXT: entry:
291 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
292 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
293 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrintp.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
294 // CHECK-NEXT: ret <4 x float> [[TMP2]]
296 float32x4_t
test_vrndpq_m_f32(float32x4_t inactive
, float32x4_t a
, mve_pred16_t p
)
299 return vrndpq_m(inactive
, a
, p
);
300 #else /* POLYMORPHIC */
301 return vrndpq_m_f32(inactive
, a
, p
);
302 #endif /* POLYMORPHIC */
305 // CHECK-LABEL: @test_vrndq_m_f16(
306 // CHECK-NEXT: entry:
307 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
308 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
309 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrintz.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
310 // CHECK-NEXT: ret <8 x half> [[TMP2]]
312 float16x8_t
test_vrndq_m_f16(float16x8_t inactive
, float16x8_t a
, mve_pred16_t p
)
315 return vrndq_m(inactive
, a
, p
);
316 #else /* POLYMORPHIC */
317 return vrndq_m_f16(inactive
, a
, p
);
318 #endif /* POLYMORPHIC */
321 // CHECK-LABEL: @test_vrndq_m_f32(
322 // CHECK-NEXT: entry:
323 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
324 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
325 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrintz.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
326 // CHECK-NEXT: ret <4 x float> [[TMP2]]
328 float32x4_t
test_vrndq_m_f32(float32x4_t inactive
, float32x4_t a
, mve_pred16_t p
)
331 return vrndq_m(inactive
, a
, p
);
332 #else /* POLYMORPHIC */
333 return vrndq_m_f32(inactive
, a
, p
);
334 #endif /* POLYMORPHIC */
337 // CHECK-LABEL: @test_vrndxq_m_f16(
338 // CHECK-NEXT: entry:
339 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
340 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
341 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrintx.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> [[INACTIVE:%.*]])
342 // CHECK-NEXT: ret <8 x half> [[TMP2]]
344 float16x8_t
test_vrndxq_m_f16(float16x8_t inactive
, float16x8_t a
, mve_pred16_t p
)
347 return vrndxq_m(inactive
, a
, p
);
348 #else /* POLYMORPHIC */
349 return vrndxq_m_f16(inactive
, a
, p
);
350 #endif /* POLYMORPHIC */
353 // CHECK-LABEL: @test_vrndxq_m_f32(
354 // CHECK-NEXT: entry:
355 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
356 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
357 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrintx.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> [[INACTIVE:%.*]])
358 // CHECK-NEXT: ret <4 x float> [[TMP2]]
360 float32x4_t
test_vrndxq_m_f32(float32x4_t inactive
, float32x4_t a
, mve_pred16_t p
)
363 return vrndxq_m(inactive
, a
, p
);
364 #else /* POLYMORPHIC */
365 return vrndxq_m_f32(inactive
, a
, p
);
366 #endif /* POLYMORPHIC */
369 // CHECK-LABEL: @test_vrndaq_x_f16(
370 // CHECK-NEXT: entry:
371 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
372 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
373 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrinta.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
374 // CHECK-NEXT: ret <8 x half> [[TMP2]]
376 float16x8_t
test_vrndaq_x_f16(float16x8_t a
, mve_pred16_t p
)
379 return vrndaq_x(a
, p
);
380 #else /* POLYMORPHIC */
381 return vrndaq_x_f16(a
, p
);
382 #endif /* POLYMORPHIC */
385 // CHECK-LABEL: @test_vrndaq_x_f32(
386 // CHECK-NEXT: entry:
387 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
388 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
389 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrinta.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
390 // CHECK-NEXT: ret <4 x float> [[TMP2]]
392 float32x4_t
test_vrndaq_x_f32(float32x4_t a
, mve_pred16_t p
)
395 return vrndaq_x(a
, p
);
396 #else /* POLYMORPHIC */
397 return vrndaq_x_f32(a
, p
);
398 #endif /* POLYMORPHIC */
401 // CHECK-LABEL: @test_vrndmq_x_f16(
402 // CHECK-NEXT: entry:
403 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
404 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
405 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrintm.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
406 // CHECK-NEXT: ret <8 x half> [[TMP2]]
408 float16x8_t
test_vrndmq_x_f16(float16x8_t a
, mve_pred16_t p
)
411 return vrndmq_x(a
, p
);
412 #else /* POLYMORPHIC */
413 return vrndmq_x_f16(a
, p
);
414 #endif /* POLYMORPHIC */
417 // CHECK-LABEL: @test_vrndmq_x_f32(
418 // CHECK-NEXT: entry:
419 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
420 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
421 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrintm.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
422 // CHECK-NEXT: ret <4 x float> [[TMP2]]
424 float32x4_t
test_vrndmq_x_f32(float32x4_t a
, mve_pred16_t p
)
427 return vrndmq_x(a
, p
);
428 #else /* POLYMORPHIC */
429 return vrndmq_x_f32(a
, p
);
430 #endif /* POLYMORPHIC */
433 // CHECK-LABEL: @test_vrndnq_x_f16(
434 // CHECK-NEXT: entry:
435 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
436 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
437 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrintn.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
438 // CHECK-NEXT: ret <8 x half> [[TMP2]]
440 float16x8_t
test_vrndnq_x_f16(float16x8_t a
, mve_pred16_t p
)
443 return vrndnq_x(a
, p
);
444 #else /* POLYMORPHIC */
445 return vrndnq_x_f16(a
, p
);
446 #endif /* POLYMORPHIC */
449 // CHECK-LABEL: @test_vrndnq_x_f32(
450 // CHECK-NEXT: entry:
451 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
452 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
453 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrintn.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
454 // CHECK-NEXT: ret <4 x float> [[TMP2]]
456 float32x4_t
test_vrndnq_x_f32(float32x4_t a
, mve_pred16_t p
)
459 return vrndnq_x(a
, p
);
460 #else /* POLYMORPHIC */
461 return vrndnq_x_f32(a
, p
);
462 #endif /* POLYMORPHIC */
465 // CHECK-LABEL: @test_vrndpq_x_f16(
466 // CHECK-NEXT: entry:
467 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
468 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
469 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrintp.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
470 // CHECK-NEXT: ret <8 x half> [[TMP2]]
472 float16x8_t
test_vrndpq_x_f16(float16x8_t a
, mve_pred16_t p
)
475 return vrndpq_x(a
, p
);
476 #else /* POLYMORPHIC */
477 return vrndpq_x_f16(a
, p
);
478 #endif /* POLYMORPHIC */
481 // CHECK-LABEL: @test_vrndpq_x_f32(
482 // CHECK-NEXT: entry:
483 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
484 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
485 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrintp.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
486 // CHECK-NEXT: ret <4 x float> [[TMP2]]
488 float32x4_t
test_vrndpq_x_f32(float32x4_t a
, mve_pred16_t p
)
491 return vrndpq_x(a
, p
);
492 #else /* POLYMORPHIC */
493 return vrndpq_x_f32(a
, p
);
494 #endif /* POLYMORPHIC */
497 // CHECK-LABEL: @test_vrndq_x_f16(
498 // CHECK-NEXT: entry:
499 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
500 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
501 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrintz.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
502 // CHECK-NEXT: ret <8 x half> [[TMP2]]
504 float16x8_t
test_vrndq_x_f16(float16x8_t a
, mve_pred16_t p
)
507 return vrndq_x(a
, p
);
508 #else /* POLYMORPHIC */
509 return vrndq_x_f16(a
, p
);
510 #endif /* POLYMORPHIC */
513 // CHECK-LABEL: @test_vrndq_x_f32(
514 // CHECK-NEXT: entry:
515 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
516 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
517 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrintz.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
518 // CHECK-NEXT: ret <4 x float> [[TMP2]]
520 float32x4_t
test_vrndq_x_f32(float32x4_t a
, mve_pred16_t p
)
523 return vrndq_x(a
, p
);
524 #else /* POLYMORPHIC */
525 return vrndq_x_f32(a
, p
);
526 #endif /* POLYMORPHIC */
529 // CHECK-LABEL: @test_vrndxq_x_f16(
530 // CHECK-NEXT: entry:
531 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
532 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
533 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vrintx.predicated.v8f16.v8i1(<8 x half> [[A:%.*]], <8 x i1> [[TMP1]], <8 x half> undef)
534 // CHECK-NEXT: ret <8 x half> [[TMP2]]
536 float16x8_t
test_vrndxq_x_f16(float16x8_t a
, mve_pred16_t p
)
539 return vrndxq_x(a
, p
);
540 #else /* POLYMORPHIC */
541 return vrndxq_x_f16(a
, p
);
542 #endif /* POLYMORPHIC */
545 // CHECK-LABEL: @test_vrndxq_x_f32(
546 // CHECK-NEXT: entry:
547 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
548 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
549 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vrintx.predicated.v4f32.v4i1(<4 x float> [[A:%.*]], <4 x i1> [[TMP1]], <4 x float> undef)
550 // CHECK-NEXT: ret <4 x float> [[TMP2]]
552 float32x4_t
test_vrndxq_x_f32(float32x4_t a
, mve_pred16_t p
)
555 return vrndxq_x(a
, p
);
556 #else /* POLYMORPHIC */
557 return vrndxq_x_f32(a
, p
);
558 #endif /* POLYMORPHIC */