[LLVM][IR] Use splat syntax when printing ConstantExpr based splats. (#116856)
[llvm-project.git] / clang / test / CodeGen / arm-mve-intrinsics / vcmulq.c
blobd648f7c55749cd38056a181a0fd791e817bfcac9
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
7 #include <arm_mve.h>
9 // CHECK-LABEL: @test_vcmulq_f16(
10 // CHECK-NEXT: entry:
11 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 0, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
12 // CHECK-NEXT: ret <8 x half> [[TMP0]]
14 float16x8_t test_vcmulq_f16(float16x8_t a, float16x8_t b)
16 #ifdef POLYMORPHIC
17 return vcmulq(a, b);
18 #else
19 return vcmulq_f16(a, b);
20 #endif
23 // CHECK-LABEL: @test_vcmulq_f32(
24 // CHECK-NEXT: entry:
25 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
26 // CHECK-NEXT: ret <4 x float> [[TMP0]]
28 float32x4_t test_vcmulq_f32(float32x4_t a, float32x4_t b)
30 #ifdef POLYMORPHIC
31 return vcmulq(a, b);
32 #else
33 return vcmulq_f32(a, b);
34 #endif
37 // CHECK-LABEL: @test_vcmulq_rot90_f16(
38 // CHECK-NEXT: entry:
39 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 1, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
40 // CHECK-NEXT: ret <8 x half> [[TMP0]]
42 float16x8_t test_vcmulq_rot90_f16(float16x8_t a, float16x8_t b)
44 #ifdef POLYMORPHIC
45 return vcmulq_rot90(a, b);
46 #else
47 return vcmulq_rot90_f16(a, b);
48 #endif
51 // CHECK-LABEL: @test_vcmulq_rot90_f32(
52 // CHECK-NEXT: entry:
53 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 1, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
54 // CHECK-NEXT: ret <4 x float> [[TMP0]]
56 float32x4_t test_vcmulq_rot90_f32(float32x4_t a, float32x4_t b)
58 #ifdef POLYMORPHIC
59 return vcmulq_rot90(a, b);
60 #else
61 return vcmulq_rot90_f32(a, b);
62 #endif
65 // CHECK-LABEL: @test_vcmulq_rot180_f16(
66 // CHECK-NEXT: entry:
67 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 2, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
68 // CHECK-NEXT: ret <8 x half> [[TMP0]]
70 float16x8_t test_vcmulq_rot180_f16(float16x8_t a, float16x8_t b)
72 #ifdef POLYMORPHIC
73 return vcmulq_rot180(a, b);
74 #else
75 return vcmulq_rot180_f16(a, b);
76 #endif
79 // CHECK-LABEL: @test_vcmulq_rot180_f32(
80 // CHECK-NEXT: entry:
81 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 2, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
82 // CHECK-NEXT: ret <4 x float> [[TMP0]]
84 float32x4_t test_vcmulq_rot180_f32(float32x4_t a, float32x4_t b)
86 #ifdef POLYMORPHIC
87 return vcmulq_rot180(a, b);
88 #else
89 return vcmulq_rot180_f32(a, b);
90 #endif
93 // CHECK-LABEL: @test_vcmulq_rot270_f16(
94 // CHECK-NEXT: entry:
95 // CHECK-NEXT: [[TMP0:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 3, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]])
96 // CHECK-NEXT: ret <8 x half> [[TMP0]]
98 float16x8_t test_vcmulq_rot270_f16(float16x8_t a, float16x8_t b)
100 #ifdef POLYMORPHIC
101 return vcmulq_rot270(a, b);
102 #else
103 return vcmulq_rot270_f16(a, b);
104 #endif
107 // CHECK-LABEL: @test_vcmulq_rot270_f32(
108 // CHECK-NEXT: entry:
109 // CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 3, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]])
110 // CHECK-NEXT: ret <4 x float> [[TMP0]]
112 float32x4_t test_vcmulq_rot270_f32(float32x4_t a, float32x4_t b)
114 #ifdef POLYMORPHIC
115 return vcmulq_rot270(a, b);
116 #else
117 return vcmulq_rot270_f32(a, b);
118 #endif
121 // CHECK-LABEL: @test_vcmulq_m_f16(
122 // CHECK-NEXT: entry:
123 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
124 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
125 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 0, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
126 // CHECK-NEXT: ret <8 x half> [[TMP2]]
128 float16x8_t test_vcmulq_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
130 #ifdef polymorphic
131 return vcmulq_m(inactive, a, b, p);
132 #else
133 return vcmulq_m_f16(inactive, a, b, p);
134 #endif
137 // CHECK-LABEL: @test_vcmulq_m_f32(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
140 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
141 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 0, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
142 // CHECK-NEXT: ret <4 x float> [[TMP2]]
144 float32x4_t test_vcmulq_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
146 #ifdef polymorphic
147 return vcmulq_m(inactive, a, b, p);
148 #else
149 return vcmulq_m_f32(inactive, a, b, p);
150 #endif
153 // CHECK-LABEL: @test_vcmulq_rot90_m_f16(
154 // CHECK-NEXT: entry:
155 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
156 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
157 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 1, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
158 // CHECK-NEXT: ret <8 x half> [[TMP2]]
160 float16x8_t test_vcmulq_rot90_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
162 #ifdef polymorphic
163 return vcmulq_rot90_m(inactive, a, b, p);
164 #else
165 return vcmulq_rot90_m_f16(inactive, a, b, p);
166 #endif
169 // CHECK-LABEL: @test_vcmulq_rot90_m_f32(
170 // CHECK-NEXT: entry:
171 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
172 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
173 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 1, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
174 // CHECK-NEXT: ret <4 x float> [[TMP2]]
176 float32x4_t test_vcmulq_rot90_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
178 #ifdef polymorphic
179 return vcmulq_rot90_m(inactive, a, b, p);
180 #else
181 return vcmulq_rot90_m_f32(inactive, a, b, p);
182 #endif
185 // CHECK-LABEL: @test_vcmulq_rot180_m_f16(
186 // CHECK-NEXT: entry:
187 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
188 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
189 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 2, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
190 // CHECK-NEXT: ret <8 x half> [[TMP2]]
192 float16x8_t test_vcmulq_rot180_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
194 #ifdef polymorphic
195 return vcmulq_rot180_m(inactive, a, b, p);
196 #else
197 return vcmulq_rot180_m_f16(inactive, a, b, p);
198 #endif
201 // CHECK-LABEL: @test_vcmulq_rot180_m_f32(
202 // CHECK-NEXT: entry:
203 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
204 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
205 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 2, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
206 // CHECK-NEXT: ret <4 x float> [[TMP2]]
208 float32x4_t test_vcmulq_rot180_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
210 #ifdef polymorphic
211 return vcmulq_rot180_m(inactive, a, b, p);
212 #else
213 return vcmulq_rot180_m_f32(inactive, a, b, p);
214 #endif
217 // CHECK-LABEL: @test_vcmulq_rot270_m_f16(
218 // CHECK-NEXT: entry:
219 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
220 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
221 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 3, <8 x half> [[INACTIVE:%.*]], <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
222 // CHECK-NEXT: ret <8 x half> [[TMP2]]
224 float16x8_t test_vcmulq_rot270_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
226 #ifdef polymorphic
227 return vcmulq_rot270_m(inactive, a, b, p);
228 #else
229 return vcmulq_rot270_m_f16(inactive, a, b, p);
230 #endif
233 // CHECK-LABEL: @test_vcmulq_rot270_m_f32(
234 // CHECK-NEXT: entry:
235 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
236 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
237 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 3, <4 x float> [[INACTIVE:%.*]], <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
238 // CHECK-NEXT: ret <4 x float> [[TMP2]]
240 float32x4_t test_vcmulq_rot270_m_f32(float32x4_t inactive, float32x4_t a, float32x4_t b, mve_pred16_t p)
242 #ifdef polymorphic
243 return vcmulq_rot270_m(inactive, a, b, p);
244 #else
245 return vcmulq_rot270_m_f32(inactive, a, b, p);
246 #endif
249 // CHECK-LABEL: @test_vcmulq_x_f16(
250 // CHECK-NEXT: entry:
251 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
252 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
253 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 0, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
254 // CHECK-NEXT: ret <8 x half> [[TMP2]]
256 float16x8_t test_vcmulq_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
258 #ifdef POLYMORPHIC
259 return vcmulq_x(a, b, p);
260 #else
261 return vcmulq_x_f16(a, b, p);
262 #endif
265 // CHECK-LABEL: @test_vcmulq_x_f32(
266 // CHECK-NEXT: entry:
267 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
268 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
269 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 0, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
270 // CHECK-NEXT: ret <4 x float> [[TMP2]]
272 float32x4_t test_vcmulq_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
274 #ifdef POLYMORPHIC
275 return vcmulq_x(a, b, p);
276 #else
277 return vcmulq_x_f32(a, b, p);
278 #endif
281 // CHECK-LABEL: @test_vcmulq_rot90_x_f16(
282 // CHECK-NEXT: entry:
283 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
284 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
285 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 1, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
286 // CHECK-NEXT: ret <8 x half> [[TMP2]]
288 float16x8_t test_vcmulq_rot90_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
290 #ifdef POLYMORPHIC
291 return vcmulq_rot90_x(a, b, p);
292 #else
293 return vcmulq_rot90_x_f16(a, b, p);
294 #endif
297 // CHECK-LABEL: @test_vcmulq_rot90_x_f32(
298 // CHECK-NEXT: entry:
299 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
300 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
301 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 1, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
302 // CHECK-NEXT: ret <4 x float> [[TMP2]]
304 float32x4_t test_vcmulq_rot90_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
306 #ifdef POLYMORPHIC
307 return vcmulq_rot90_x(a, b, p);
308 #else
309 return vcmulq_rot90_x_f32(a, b, p);
310 #endif
313 // CHECK-LABEL: @test_vcmulq_rot180_x_f16(
314 // CHECK-NEXT: entry:
315 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
316 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
317 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 2, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
318 // CHECK-NEXT: ret <8 x half> [[TMP2]]
320 float16x8_t test_vcmulq_rot180_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
322 #ifdef POLYMORPHIC
323 return vcmulq_rot180_x(a, b, p);
324 #else
325 return vcmulq_rot180_x_f16(a, b, p);
326 #endif
329 // CHECK-LABEL: @test_vcmulq_rot180_x_f32(
330 // CHECK-NEXT: entry:
331 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
332 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
333 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 2, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
334 // CHECK-NEXT: ret <4 x float> [[TMP2]]
336 float32x4_t test_vcmulq_rot180_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
338 #ifdef POLYMORPHIC
339 return vcmulq_rot180_x(a, b, p);
340 #else
341 return vcmulq_rot180_x_f32(a, b, p);
342 #endif
345 // CHECK-LABEL: @test_vcmulq_rot270_x_f16(
346 // CHECK-NEXT: entry:
347 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
348 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
349 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x half> @llvm.arm.mve.vcmulq.predicated.v8f16.v8i1(i32 3, <8 x half> undef, <8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x i1> [[TMP1]])
350 // CHECK-NEXT: ret <8 x half> [[TMP2]]
352 float16x8_t test_vcmulq_rot270_x_f16(float16x8_t a, float16x8_t b, mve_pred16_t p)
354 #ifdef POLYMORPHIC
355 return vcmulq_rot270_x(a, b, p);
356 #else
357 return vcmulq_rot270_x_f16(a, b, p);
358 #endif
361 // CHECK-LABEL: @test_vcmulq_rot270_x_f32(
362 // CHECK-NEXT: entry:
363 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
364 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
365 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.arm.mve.vcmulq.predicated.v4f32.v4i1(i32 3, <4 x float> undef, <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i1> [[TMP1]])
366 // CHECK-NEXT: ret <4 x float> [[TMP2]]
368 float32x4_t test_vcmulq_rot270_x_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
370 #ifdef POLYMORPHIC
371 return vcmulq_rot270_x(a, b, p);
372 #else
373 return vcmulq_rot270_x_f32(a, b, p);
374 #endif