Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / arm-mve-intrinsics / vmldav.c
blob1713e04c9920625d0d79530877da83e20ad9e101
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -passes=mem2reg,sroa | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
7 #include <arm_mve.h>
9 // CHECK-LABEL: @test_vmladavaq_s8(
10 // CHECK-NEXT: entry:
11 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]])
12 // CHECK-NEXT: ret i32 [[TMP0]]
14 int32_t test_vmladavaq_s8(int32_t a, int8x16_t b, int8x16_t c) {
15 #ifdef POLYMORPHIC
16 return vmladavaq(a, b, c);
17 #else
18 return vmladavaq_s8(a, b, c);
19 #endif
22 // CHECK-LABEL: @test_vmladavaq_s16(
23 // CHECK-NEXT: entry:
24 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
25 // CHECK-NEXT: ret i32 [[TMP0]]
27 int32_t test_vmladavaq_s16(int32_t a, int16x8_t b, int16x8_t c) {
28 #ifdef POLYMORPHIC
29 return vmladavaq(a, b, c);
30 #else
31 return vmladavaq_s16(a, b, c);
32 #endif
35 // CHECK-LABEL: @test_vmladavaq_s32(
36 // CHECK-NEXT: entry:
37 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
38 // CHECK-NEXT: ret i32 [[TMP0]]
40 int32_t test_vmladavaq_s32(int32_t a, int32x4_t b, int32x4_t c) {
41 #ifdef POLYMORPHIC
42 return vmladavaq(a, b, c);
43 #else
44 return vmladavaq_s32(a, b, c);
45 #endif
48 // CHECK-LABEL: @test_vmladavaq_u8(
49 // CHECK-NEXT: entry:
50 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 1, i32 0, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]])
51 // CHECK-NEXT: ret i32 [[TMP0]]
53 uint32_t test_vmladavaq_u8(uint32_t a, uint8x16_t b, uint8x16_t c) {
54 #ifdef POLYMORPHIC
55 return vmladavaq(a, b, c);
56 #else
57 return vmladavaq_u8(a, b, c);
58 #endif
61 // CHECK-LABEL: @test_vmladavaq_u16(
62 // CHECK-NEXT: entry:
63 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 1, i32 0, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
64 // CHECK-NEXT: ret i32 [[TMP0]]
66 uint32_t test_vmladavaq_u16(uint32_t a, uint16x8_t b, uint16x8_t c) {
67 #ifdef POLYMORPHIC
68 return vmladavaq(a, b, c);
69 #else
70 return vmladavaq_u16(a, b, c);
71 #endif
74 // CHECK-LABEL: @test_vmladavaq_u32(
75 // CHECK-NEXT: entry:
76 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 1, i32 0, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
77 // CHECK-NEXT: ret i32 [[TMP0]]
79 uint32_t test_vmladavaq_u32(uint32_t a, uint32x4_t b, uint32x4_t c) {
80 #ifdef POLYMORPHIC
81 return vmladavaq(a, b, c);
82 #else
83 return vmladavaq_u32(a, b, c);
84 #endif
87 // CHECK-LABEL: @test_vmladavaxq_s8(
88 // CHECK-NEXT: entry:
89 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 1, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]])
90 // CHECK-NEXT: ret i32 [[TMP0]]
92 int32_t test_vmladavaxq_s8(int32_t a, int8x16_t b, int8x16_t c) {
93 #ifdef POLYMORPHIC
94 return vmladavaxq(a, b, c);
95 #else
96 return vmladavaxq_s8(a, b, c);
97 #endif
100 // CHECK-LABEL: @test_vmladavaxq_s16(
101 // CHECK-NEXT: entry:
102 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 1, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
103 // CHECK-NEXT: ret i32 [[TMP0]]
105 int32_t test_vmladavaxq_s16(int32_t a, int16x8_t b, int16x8_t c) {
106 #ifdef POLYMORPHIC
107 return vmladavaxq(a, b, c);
108 #else
109 return vmladavaxq_s16(a, b, c);
110 #endif
113 // CHECK-LABEL: @test_vmladavaxq_s32(
114 // CHECK-NEXT: entry:
115 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 1, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
116 // CHECK-NEXT: ret i32 [[TMP0]]
118 int32_t test_vmladavaxq_s32(int32_t a, int32x4_t b, int32x4_t c) {
119 #ifdef POLYMORPHIC
120 return vmladavaxq(a, b, c);
121 #else
122 return vmladavaxq_s32(a, b, c);
123 #endif
126 // CHECK-LABEL: @test_vmlsdavaq_s8(
127 // CHECK-NEXT: entry:
128 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]])
129 // CHECK-NEXT: ret i32 [[TMP0]]
131 int32_t test_vmlsdavaq_s8(int32_t a, int8x16_t b, int8x16_t c) {
132 #ifdef POLYMORPHIC
133 return vmlsdavaq(a, b, c);
134 #else
135 return vmlsdavaq_s8(a, b, c);
136 #endif
139 // CHECK-LABEL: @test_vmlsdavaq_s16(
140 // CHECK-NEXT: entry:
141 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
142 // CHECK-NEXT: ret i32 [[TMP0]]
144 int32_t test_vmlsdavaq_s16(int32_t a, int16x8_t b, int16x8_t c) {
145 #ifdef POLYMORPHIC
146 return vmlsdavaq(a, b, c);
147 #else
148 return vmlsdavaq_s16(a, b, c);
149 #endif
152 // CHECK-LABEL: @test_vmlsdavaq_s32(
153 // CHECK-NEXT: entry:
154 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
155 // CHECK-NEXT: ret i32 [[TMP0]]
157 int32_t test_vmlsdavaq_s32(int32_t a, int32x4_t b, int32x4_t c) {
158 #ifdef POLYMORPHIC
159 return vmlsdavaq(a, b, c);
160 #else
161 return vmlsdavaq_s32(a, b, c);
162 #endif
165 // CHECK-LABEL: @test_vmlsdavaxq_s8(
166 // CHECK-NEXT: entry:
167 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 1, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]])
168 // CHECK-NEXT: ret i32 [[TMP0]]
170 int32_t test_vmlsdavaxq_s8(int32_t a, int8x16_t b, int8x16_t c) {
171 #ifdef POLYMORPHIC
172 return vmlsdavaxq(a, b, c);
173 #else
174 return vmlsdavaxq_s8(a, b, c);
175 #endif
178 // CHECK-LABEL: @test_vmlsdavaxq_s16(
179 // CHECK-NEXT: entry:
180 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 1, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]])
181 // CHECK-NEXT: ret i32 [[TMP0]]
183 int32_t test_vmlsdavaxq_s16(int32_t a, int16x8_t b, int16x8_t c) {
184 #ifdef POLYMORPHIC
185 return vmlsdavaxq(a, b, c);
186 #else
187 return vmlsdavaxq_s16(a, b, c);
188 #endif
191 // CHECK-LABEL: @test_vmlsdavaxq_s32(
192 // CHECK-NEXT: entry:
193 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 1, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]])
194 // CHECK-NEXT: ret i32 [[TMP0]]
196 int32_t test_vmlsdavaxq_s32(int32_t a, int32x4_t b, int32x4_t c) {
197 #ifdef POLYMORPHIC
198 return vmlsdavaxq(a, b, c);
199 #else
200 return vmlsdavaxq_s32(a, b, c);
201 #endif
204 // CHECK-LABEL: @test_vmladavaq_p_s8(
205 // CHECK-NEXT: entry:
206 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
207 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
208 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i1> [[TMP1]])
209 // CHECK-NEXT: ret i32 [[TMP2]]
211 int32_t test_vmladavaq_p_s8(int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p) {
212 #ifdef POLYMORPHIC
213 return vmladavaq_p(a, b, c, p);
214 #else
215 return vmladavaq_p_s8(a, b, c, p);
216 #endif
219 // CHECK-LABEL: @test_vmladavaq_p_s16(
220 // CHECK-NEXT: entry:
221 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
222 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
223 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP1]])
224 // CHECK-NEXT: ret i32 [[TMP2]]
226 int32_t test_vmladavaq_p_s16(int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
227 #ifdef POLYMORPHIC
228 return vmladavaq_p(a, b, c, p);
229 #else
230 return vmladavaq_p_s16(a, b, c, p);
231 #endif
234 // CHECK-LABEL: @test_vmladavaq_p_s32(
235 // CHECK-NEXT: entry:
236 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
237 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
238 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP1]])
239 // CHECK-NEXT: ret i32 [[TMP2]]
241 int32_t test_vmladavaq_p_s32(int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
242 #ifdef POLYMORPHIC
243 return vmladavaq_p(a, b, c, p);
244 #else
245 return vmladavaq_p_s32(a, b, c, p);
246 #endif
249 // CHECK-LABEL: @test_vmladavaq_p_u8(
250 // CHECK-NEXT: entry:
251 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
252 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
253 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 1, i32 0, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i1> [[TMP1]])
254 // CHECK-NEXT: ret i32 [[TMP2]]
256 uint32_t test_vmladavaq_p_u8(uint32_t a, uint8x16_t b, uint8x16_t c, mve_pred16_t p) {
257 #ifdef POLYMORPHIC
258 return vmladavaq_p(a, b, c, p);
259 #else
260 return vmladavaq_p_u8(a, b, c, p);
261 #endif
264 // CHECK-LABEL: @test_vmladavaq_p_u16(
265 // CHECK-NEXT: entry:
266 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
267 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
268 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 1, i32 0, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP1]])
269 // CHECK-NEXT: ret i32 [[TMP2]]
271 uint32_t test_vmladavaq_p_u16(uint32_t a, uint16x8_t b, uint16x8_t c, mve_pred16_t p) {
272 #ifdef POLYMORPHIC
273 return vmladavaq_p(a, b, c, p);
274 #else
275 return vmladavaq_p_u16(a, b, c, p);
276 #endif
279 // CHECK-LABEL: @test_vmladavaq_p_u32(
280 // CHECK-NEXT: entry:
281 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
282 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
283 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 1, i32 0, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP1]])
284 // CHECK-NEXT: ret i32 [[TMP2]]
286 uint32_t test_vmladavaq_p_u32(uint32_t a, uint32x4_t b, uint32x4_t c, mve_pred16_t p) {
287 #ifdef POLYMORPHIC
288 return vmladavaq_p(a, b, c, p);
289 #else
290 return vmladavaq_p_u32(a, b, c, p);
291 #endif
294 // CHECK-LABEL: @test_vmladavaxq_p_s8(
295 // CHECK-NEXT: entry:
296 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
297 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
298 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 1, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i1> [[TMP1]])
299 // CHECK-NEXT: ret i32 [[TMP2]]
301 int32_t test_vmladavaxq_p_s8(int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p) {
302 #ifdef POLYMORPHIC
303 return vmladavaxq_p(a, b, c, p);
304 #else
305 return vmladavaxq_p_s8(a, b, c, p);
306 #endif
309 // CHECK-LABEL: @test_vmladavaxq_p_s16(
310 // CHECK-NEXT: entry:
311 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
312 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
313 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 1, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP1]])
314 // CHECK-NEXT: ret i32 [[TMP2]]
316 int32_t test_vmladavaxq_p_s16(int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
317 #ifdef POLYMORPHIC
318 return vmladavaxq_p(a, b, c, p);
319 #else
320 return vmladavaxq_p_s16(a, b, c, p);
321 #endif
324 // CHECK-LABEL: @test_vmladavaxq_p_s32(
325 // CHECK-NEXT: entry:
326 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
327 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
328 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 1, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP1]])
329 // CHECK-NEXT: ret i32 [[TMP2]]
331 int32_t test_vmladavaxq_p_s32(int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
332 #ifdef POLYMORPHIC
333 return vmladavaxq_p(a, b, c, p);
334 #else
335 return vmladavaxq_p_s32(a, b, c, p);
336 #endif
339 // CHECK-LABEL: @test_vmlsdavaq_p_s8(
340 // CHECK-NEXT: entry:
341 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
342 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
343 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 1, i32 0, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i1> [[TMP1]])
344 // CHECK-NEXT: ret i32 [[TMP2]]
346 int32_t test_vmlsdavaq_p_s8(int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p) {
347 #ifdef POLYMORPHIC
348 return vmlsdavaq_p(a, b, c, p);
349 #else
350 return vmlsdavaq_p_s8(a, b, c, p);
351 #endif
354 // CHECK-LABEL: @test_vmlsdavaq_p_s16(
355 // CHECK-NEXT: entry:
356 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
357 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
358 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 0, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP1]])
359 // CHECK-NEXT: ret i32 [[TMP2]]
361 int32_t test_vmlsdavaq_p_s16(int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
362 #ifdef POLYMORPHIC
363 return vmlsdavaq_p(a, b, c, p);
364 #else
365 return vmlsdavaq_p_s16(a, b, c, p);
366 #endif
369 // CHECK-LABEL: @test_vmlsdavaq_p_s32(
370 // CHECK-NEXT: entry:
371 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
372 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
373 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 0, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP1]])
374 // CHECK-NEXT: ret i32 [[TMP2]]
376 int32_t test_vmlsdavaq_p_s32(int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
377 #ifdef POLYMORPHIC
378 return vmlsdavaq_p(a, b, c, p);
379 #else
380 return vmlsdavaq_p_s32(a, b, c, p);
381 #endif
384 // CHECK-LABEL: @test_vmlsdavaxq_p_s8(
385 // CHECK-NEXT: entry:
386 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
387 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
388 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 1, i32 1, i32 [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]], <16 x i1> [[TMP1]])
389 // CHECK-NEXT: ret i32 [[TMP2]]
391 int32_t test_vmlsdavaxq_p_s8(int32_t a, int8x16_t b, int8x16_t c, mve_pred16_t p) {
392 #ifdef POLYMORPHIC
393 return vmlsdavaxq_p(a, b, c, p);
394 #else
395 return vmlsdavaxq_p_s8(a, b, c, p);
396 #endif
399 // CHECK-LABEL: @test_vmlsdavaxq_p_s16(
400 // CHECK-NEXT: entry:
401 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
402 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
403 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 1, i32 [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i16> [[C:%.*]], <8 x i1> [[TMP1]])
404 // CHECK-NEXT: ret i32 [[TMP2]]
406 int32_t test_vmlsdavaxq_p_s16(int32_t a, int16x8_t b, int16x8_t c, mve_pred16_t p) {
407 #ifdef POLYMORPHIC
408 return vmlsdavaxq_p(a, b, c, p);
409 #else
410 return vmlsdavaxq_p_s16(a, b, c, p);
411 #endif
414 // CHECK-LABEL: @test_vmlsdavaxq_p_s32(
415 // CHECK-NEXT: entry:
416 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
417 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
418 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 1, i32 [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i32> [[C:%.*]], <4 x i1> [[TMP1]])
419 // CHECK-NEXT: ret i32 [[TMP2]]
421 int32_t test_vmlsdavaxq_p_s32(int32_t a, int32x4_t b, int32x4_t c, mve_pred16_t p) {
422 #ifdef POLYMORPHIC
423 return vmlsdavaxq_p(a, b, c, p);
424 #else
425 return vmlsdavaxq_p_s32(a, b, c, p);
426 #endif
429 // CHECK-LABEL: @test_vmladavq_s8(
430 // CHECK-NEXT: entry:
431 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
432 // CHECK-NEXT: ret i32 [[TMP0]]
434 int32_t test_vmladavq_s8(int8x16_t a, int8x16_t b) {
435 #ifdef POLYMORPHIC
436 return vmladavq(a, b);
437 #else
438 return vmladavq_s8(a, b);
439 #endif
442 // CHECK-LABEL: @test_vmladavq_s16(
443 // CHECK-NEXT: entry:
444 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
445 // CHECK-NEXT: ret i32 [[TMP0]]
447 int32_t test_vmladavq_s16(int16x8_t a, int16x8_t b) {
448 #ifdef POLYMORPHIC
449 return vmladavq(a, b);
450 #else
451 return vmladavq_s16(a, b);
452 #endif
455 // CHECK-LABEL: @test_vmladavq_s32(
456 // CHECK-NEXT: entry:
457 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
458 // CHECK-NEXT: ret i32 [[TMP0]]
460 int32_t test_vmladavq_s32(int32x4_t a, int32x4_t b) {
461 #ifdef POLYMORPHIC
462 return vmladavq(a, b);
463 #else
464 return vmladavq_s32(a, b);
465 #endif
468 // CHECK-LABEL: @test_vmladavq_u8(
469 // CHECK-NEXT: entry:
470 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 1, i32 0, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
471 // CHECK-NEXT: ret i32 [[TMP0]]
473 uint32_t test_vmladavq_u8(uint8x16_t a, uint8x16_t b) {
474 #ifdef POLYMORPHIC
475 return vmladavq(a, b);
476 #else
477 return vmladavq_u8(a, b);
478 #endif
481 // CHECK-LABEL: @test_vmladavq_u16(
482 // CHECK-NEXT: entry:
483 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 1, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
484 // CHECK-NEXT: ret i32 [[TMP0]]
486 uint32_t test_vmladavq_u16(uint16x8_t a, uint16x8_t b) {
487 #ifdef POLYMORPHIC
488 return vmladavq(a, b);
489 #else
490 return vmladavq_u16(a, b);
491 #endif
494 // CHECK-LABEL: @test_vmladavq_u32(
495 // CHECK-NEXT: entry:
496 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 1, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
497 // CHECK-NEXT: ret i32 [[TMP0]]
499 uint32_t test_vmladavq_u32(uint32x4_t a, uint32x4_t b) {
500 #ifdef POLYMORPHIC
501 return vmladavq(a, b);
502 #else
503 return vmladavq_u32(a, b);
504 #endif
507 // CHECK-LABEL: @test_vmladavxq_s8(
508 // CHECK-NEXT: entry:
509 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 0, i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
510 // CHECK-NEXT: ret i32 [[TMP0]]
512 int32_t test_vmladavxq_s8(int8x16_t a, int8x16_t b) {
513 #ifdef POLYMORPHIC
514 return vmladavxq(a, b);
515 #else
516 return vmladavxq_s8(a, b);
517 #endif
520 // CHECK-LABEL: @test_vmladavxq_s16(
521 // CHECK-NEXT: entry:
522 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 0, i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
523 // CHECK-NEXT: ret i32 [[TMP0]]
525 int32_t test_vmladavxq_s16(int16x8_t a, int16x8_t b) {
526 #ifdef POLYMORPHIC
527 return vmladavxq(a, b);
528 #else
529 return vmladavxq_s16(a, b);
530 #endif
533 // CHECK-LABEL: @test_vmladavxq_s32(
534 // CHECK-NEXT: entry:
535 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 0, i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
536 // CHECK-NEXT: ret i32 [[TMP0]]
538 int32_t test_vmladavxq_s32(int32x4_t a, int32x4_t b) {
539 #ifdef POLYMORPHIC
540 return vmladavxq(a, b);
541 #else
542 return vmladavxq_s32(a, b);
543 #endif
546 // CHECK-LABEL: @test_vmlsdavq_s8(
547 // CHECK-NEXT: entry:
548 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
549 // CHECK-NEXT: ret i32 [[TMP0]]
551 int32_t test_vmlsdavq_s8(int8x16_t a, int8x16_t b) {
552 #ifdef POLYMORPHIC
553 return vmlsdavq(a, b);
554 #else
555 return vmlsdavq_s8(a, b);
556 #endif
559 // CHECK-LABEL: @test_vmlsdavq_s16(
560 // CHECK-NEXT: entry:
561 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
562 // CHECK-NEXT: ret i32 [[TMP0]]
564 int32_t test_vmlsdavq_s16(int16x8_t a, int16x8_t b) {
565 #ifdef POLYMORPHIC
566 return vmlsdavq(a, b);
567 #else
568 return vmlsdavq_s16(a, b);
569 #endif
572 // CHECK-LABEL: @test_vmlsdavq_s32(
573 // CHECK-NEXT: entry:
574 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
575 // CHECK-NEXT: ret i32 [[TMP0]]
577 int32_t test_vmlsdavq_s32(int32x4_t a, int32x4_t b) {
578 #ifdef POLYMORPHIC
579 return vmlsdavq(a, b);
580 #else
581 return vmlsdavq_s32(a, b);
582 #endif
585 // CHECK-LABEL: @test_vmlsdavxq_s8(
586 // CHECK-NEXT: entry:
587 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v16i8(i32 0, i32 1, i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]])
588 // CHECK-NEXT: ret i32 [[TMP0]]
590 int32_t test_vmlsdavxq_s8(int8x16_t a, int8x16_t b) {
591 #ifdef POLYMORPHIC
592 return vmlsdavxq(a, b);
593 #else
594 return vmlsdavxq_s8(a, b);
595 #endif
598 // CHECK-LABEL: @test_vmlsdavxq_s16(
599 // CHECK-NEXT: entry:
600 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v8i16(i32 0, i32 1, i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]])
601 // CHECK-NEXT: ret i32 [[TMP0]]
603 int32_t test_vmlsdavxq_s16(int16x8_t a, int16x8_t b) {
604 #ifdef POLYMORPHIC
605 return vmlsdavxq(a, b);
606 #else
607 return vmlsdavxq_s16(a, b);
608 #endif
611 // CHECK-LABEL: @test_vmlsdavxq_s32(
612 // CHECK-NEXT: entry:
613 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.arm.mve.vmldava.v4i32(i32 0, i32 1, i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]])
614 // CHECK-NEXT: ret i32 [[TMP0]]
616 int32_t test_vmlsdavxq_s32(int32x4_t a, int32x4_t b) {
617 #ifdef POLYMORPHIC
618 return vmlsdavxq(a, b);
619 #else
620 return vmlsdavxq_s32(a, b);
621 #endif
624 // CHECK-LABEL: @test_vmladavq_p_s8(
625 // CHECK-NEXT: entry:
626 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
627 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
628 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
629 // CHECK-NEXT: ret i32 [[TMP2]]
631 int32_t test_vmladavq_p_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) {
632 #ifdef POLYMORPHIC
633 return vmladavq_p(a, b, p);
634 #else
635 return vmladavq_p_s8(a, b, p);
636 #endif
639 // CHECK-LABEL: @test_vmladavq_p_s16(
640 // CHECK-NEXT: entry:
641 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
642 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
643 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
644 // CHECK-NEXT: ret i32 [[TMP2]]
646 int32_t test_vmladavq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
647 #ifdef POLYMORPHIC
648 return vmladavq_p(a, b, p);
649 #else
650 return vmladavq_p_s16(a, b, p);
651 #endif
654 // CHECK-LABEL: @test_vmladavq_p_s32(
655 // CHECK-NEXT: entry:
656 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
657 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
658 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
659 // CHECK-NEXT: ret i32 [[TMP2]]
661 int32_t test_vmladavq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
662 #ifdef POLYMORPHIC
663 return vmladavq_p(a, b, p);
664 #else
665 return vmladavq_p_s32(a, b, p);
666 #endif
669 // CHECK-LABEL: @test_vmladavq_p_u8(
670 // CHECK-NEXT: entry:
671 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
672 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
673 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 1, i32 0, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
674 // CHECK-NEXT: ret i32 [[TMP2]]
676 uint32_t test_vmladavq_p_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p) {
677 #ifdef POLYMORPHIC
678 return vmladavq_p(a, b, p);
679 #else
680 return vmladavq_p_u8(a, b, p);
681 #endif
684 // CHECK-LABEL: @test_vmladavq_p_u16(
685 // CHECK-NEXT: entry:
686 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
687 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
688 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 1, i32 0, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
689 // CHECK-NEXT: ret i32 [[TMP2]]
691 uint32_t test_vmladavq_p_u16(uint16x8_t a, uint16x8_t b, mve_pred16_t p) {
692 #ifdef POLYMORPHIC
693 return vmladavq_p(a, b, p);
694 #else
695 return vmladavq_p_u16(a, b, p);
696 #endif
699 // CHECK-LABEL: @test_vmladavq_p_u32(
700 // CHECK-NEXT: entry:
701 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
702 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
703 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 1, i32 0, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
704 // CHECK-NEXT: ret i32 [[TMP2]]
706 uint32_t test_vmladavq_p_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p) {
707 #ifdef POLYMORPHIC
708 return vmladavq_p(a, b, p);
709 #else
710 return vmladavq_p_u32(a, b, p);
711 #endif
714 // CHECK-LABEL: @test_vmladavxq_p_s8(
715 // CHECK-NEXT: entry:
716 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
717 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
718 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 0, i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
719 // CHECK-NEXT: ret i32 [[TMP2]]
721 int32_t test_vmladavxq_p_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) {
722 #ifdef POLYMORPHIC
723 return vmladavxq_p(a, b, p);
724 #else
725 return vmladavxq_p_s8(a, b, p);
726 #endif
729 // CHECK-LABEL: @test_vmladavxq_p_s16(
730 // CHECK-NEXT: entry:
731 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
732 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
733 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 0, i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
734 // CHECK-NEXT: ret i32 [[TMP2]]
736 int32_t test_vmladavxq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
737 #ifdef POLYMORPHIC
738 return vmladavxq_p(a, b, p);
739 #else
740 return vmladavxq_p_s16(a, b, p);
741 #endif
744 // CHECK-LABEL: @test_vmladavxq_p_s32(
745 // CHECK-NEXT: entry:
746 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
747 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
748 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 0, i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
749 // CHECK-NEXT: ret i32 [[TMP2]]
751 int32_t test_vmladavxq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
752 #ifdef POLYMORPHIC
753 return vmladavxq_p(a, b, p);
754 #else
755 return vmladavxq_p_s32(a, b, p);
756 #endif
759 // CHECK-LABEL: @test_vmlsdavq_p_s8(
760 // CHECK-NEXT: entry:
761 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
762 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
763 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 1, i32 0, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
764 // CHECK-NEXT: ret i32 [[TMP2]]
766 int32_t test_vmlsdavq_p_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) {
767 #ifdef POLYMORPHIC
768 return vmlsdavq_p(a, b, p);
769 #else
770 return vmlsdavq_p_s8(a, b, p);
771 #endif
774 // CHECK-LABEL: @test_vmlsdavq_p_s16(
775 // CHECK-NEXT: entry:
776 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
777 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
778 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 0, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
779 // CHECK-NEXT: ret i32 [[TMP2]]
781 int32_t test_vmlsdavq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
782 #ifdef POLYMORPHIC
783 return vmlsdavq_p(a, b, p);
784 #else
785 return vmlsdavq_p_s16(a, b, p);
786 #endif
789 // CHECK-LABEL: @test_vmlsdavq_p_s32(
790 // CHECK-NEXT: entry:
791 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
792 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
793 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 0, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
794 // CHECK-NEXT: ret i32 [[TMP2]]
796 int32_t test_vmlsdavq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
797 #ifdef POLYMORPHIC
798 return vmlsdavq_p(a, b, p);
799 #else
800 return vmlsdavq_p_s32(a, b, p);
801 #endif
804 // CHECK-LABEL: @test_vmlsdavxq_p_s8(
805 // CHECK-NEXT: entry:
806 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
807 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
808 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v16i8.v16i1(i32 0, i32 1, i32 1, i32 0, <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]])
809 // CHECK-NEXT: ret i32 [[TMP2]]
811 int32_t test_vmlsdavxq_p_s8(int8x16_t a, int8x16_t b, mve_pred16_t p) {
812 #ifdef POLYMORPHIC
813 return vmlsdavxq_p(a, b, p);
814 #else
815 return vmlsdavxq_p_s8(a, b, p);
816 #endif
819 // CHECK-LABEL: @test_vmlsdavxq_p_s16(
820 // CHECK-NEXT: entry:
821 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
822 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
823 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v8i16.v8i1(i32 0, i32 1, i32 1, i32 0, <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]])
824 // CHECK-NEXT: ret i32 [[TMP2]]
826 int32_t test_vmlsdavxq_p_s16(int16x8_t a, int16x8_t b, mve_pred16_t p) {
827 #ifdef POLYMORPHIC
828 return vmlsdavxq_p(a, b, p);
829 #else
830 return vmlsdavxq_p_s16(a, b, p);
831 #endif
834 // CHECK-LABEL: @test_vmlsdavxq_p_s32(
835 // CHECK-NEXT: entry:
836 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
837 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
838 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.arm.mve.vmldava.predicated.v4i32.v4i1(i32 0, i32 1, i32 1, i32 0, <4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]])
839 // CHECK-NEXT: ret i32 [[TMP2]]
841 int32_t test_vmlsdavxq_p_s32(int32x4_t a, int32x4_t b, mve_pred16_t p) {
842 #ifdef POLYMORPHIC
843 return vmlsdavxq_p(a, b, p);
844 #else
845 return vmlsdavxq_p_s32(a, b, p);
846 #endif