1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
12 define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
13 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
16 ; CHECK-NEXT: vmadc.vv v0, v8, v9
19 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
24 ret <vscale x 1 x i1> %a
27 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
32 define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
33 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8:
34 ; CHECK: # %bb.0: # %entry
35 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
36 ; CHECK-NEXT: vmadc.vv v0, v8, v9
39 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
44 ret <vscale x 2 x i1> %a
47 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
52 define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
53 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8:
54 ; CHECK: # %bb.0: # %entry
55 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
56 ; CHECK-NEXT: vmadc.vv v0, v8, v9
59 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
64 ret <vscale x 4 x i1> %a
67 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
72 define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
73 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8:
74 ; CHECK: # %bb.0: # %entry
75 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
76 ; CHECK-NEXT: vmadc.vv v0, v8, v9
79 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
84 ret <vscale x 8 x i1> %a
87 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
92 define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
93 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8:
94 ; CHECK: # %bb.0: # %entry
95 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
96 ; CHECK-NEXT: vmadc.vv v0, v8, v10
99 %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
100 <vscale x 16 x i8> %0,
101 <vscale x 16 x i8> %1,
104 ret <vscale x 16 x i1> %a
107 declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
112 define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
113 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8:
114 ; CHECK: # %bb.0: # %entry
115 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
116 ; CHECK-NEXT: vmadc.vv v0, v8, v12
119 %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
120 <vscale x 32 x i8> %0,
121 <vscale x 32 x i8> %1,
124 ret <vscale x 32 x i1> %a
127 declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
132 define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
133 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8:
134 ; CHECK: # %bb.0: # %entry
135 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
136 ; CHECK-NEXT: vmadc.vv v0, v8, v16
139 %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
140 <vscale x 64 x i8> %0,
141 <vscale x 64 x i8> %1,
144 ret <vscale x 64 x i1> %a
147 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
152 define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
153 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16:
154 ; CHECK: # %bb.0: # %entry
155 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
156 ; CHECK-NEXT: vmadc.vv v0, v8, v9
159 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
160 <vscale x 1 x i16> %0,
161 <vscale x 1 x i16> %1,
164 ret <vscale x 1 x i1> %a
167 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
172 define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
173 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16:
174 ; CHECK: # %bb.0: # %entry
175 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
176 ; CHECK-NEXT: vmadc.vv v0, v8, v9
179 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
180 <vscale x 2 x i16> %0,
181 <vscale x 2 x i16> %1,
184 ret <vscale x 2 x i1> %a
187 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
192 define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
193 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16:
194 ; CHECK: # %bb.0: # %entry
195 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
196 ; CHECK-NEXT: vmadc.vv v0, v8, v9
199 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
200 <vscale x 4 x i16> %0,
201 <vscale x 4 x i16> %1,
204 ret <vscale x 4 x i1> %a
207 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
212 define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
213 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16:
214 ; CHECK: # %bb.0: # %entry
215 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
216 ; CHECK-NEXT: vmadc.vv v0, v8, v10
219 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
220 <vscale x 8 x i16> %0,
221 <vscale x 8 x i16> %1,
224 ret <vscale x 8 x i1> %a
227 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
232 define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
233 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16:
234 ; CHECK: # %bb.0: # %entry
235 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
236 ; CHECK-NEXT: vmadc.vv v0, v8, v12
239 %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
240 <vscale x 16 x i16> %0,
241 <vscale x 16 x i16> %1,
244 ret <vscale x 16 x i1> %a
247 declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
252 define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
253 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16:
254 ; CHECK: # %bb.0: # %entry
255 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
256 ; CHECK-NEXT: vmadc.vv v0, v8, v16
259 %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
260 <vscale x 32 x i16> %0,
261 <vscale x 32 x i16> %1,
264 ret <vscale x 32 x i1> %a
267 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
272 define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
273 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32:
274 ; CHECK: # %bb.0: # %entry
275 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
276 ; CHECK-NEXT: vmadc.vv v0, v8, v9
279 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
280 <vscale x 1 x i32> %0,
281 <vscale x 1 x i32> %1,
284 ret <vscale x 1 x i1> %a
287 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
292 define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
293 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32:
294 ; CHECK: # %bb.0: # %entry
295 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
296 ; CHECK-NEXT: vmadc.vv v0, v8, v9
299 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
300 <vscale x 2 x i32> %0,
301 <vscale x 2 x i32> %1,
304 ret <vscale x 2 x i1> %a
307 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
312 define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
313 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32:
314 ; CHECK: # %bb.0: # %entry
315 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
316 ; CHECK-NEXT: vmadc.vv v0, v8, v10
319 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
320 <vscale x 4 x i32> %0,
321 <vscale x 4 x i32> %1,
324 ret <vscale x 4 x i1> %a
327 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
332 define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
333 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32:
334 ; CHECK: # %bb.0: # %entry
335 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
336 ; CHECK-NEXT: vmadc.vv v0, v8, v12
339 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
340 <vscale x 8 x i32> %0,
341 <vscale x 8 x i32> %1,
344 ret <vscale x 8 x i1> %a
347 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
352 define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
353 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32:
354 ; CHECK: # %bb.0: # %entry
355 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
356 ; CHECK-NEXT: vmadc.vv v0, v8, v16
359 %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
360 <vscale x 16 x i32> %0,
361 <vscale x 16 x i32> %1,
364 ret <vscale x 16 x i1> %a
367 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
372 define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
373 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64:
374 ; CHECK: # %bb.0: # %entry
375 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
376 ; CHECK-NEXT: vmadc.vv v0, v8, v9
379 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
380 <vscale x 1 x i64> %0,
381 <vscale x 1 x i64> %1,
384 ret <vscale x 1 x i1> %a
387 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
392 define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
393 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64:
394 ; CHECK: # %bb.0: # %entry
395 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
396 ; CHECK-NEXT: vmadc.vv v0, v8, v10
399 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
400 <vscale x 2 x i64> %0,
401 <vscale x 2 x i64> %1,
404 ret <vscale x 2 x i1> %a
407 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
412 define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
413 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64:
414 ; CHECK: # %bb.0: # %entry
415 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
416 ; CHECK-NEXT: vmadc.vv v0, v8, v12
419 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
420 <vscale x 4 x i64> %0,
421 <vscale x 4 x i64> %1,
424 ret <vscale x 4 x i1> %a
427 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
432 define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
433 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64:
434 ; CHECK: # %bb.0: # %entry
435 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
436 ; CHECK-NEXT: vmadc.vv v0, v8, v16
439 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
440 <vscale x 8 x i64> %0,
441 <vscale x 8 x i64> %1,
444 ret <vscale x 8 x i1> %a
447 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
452 define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
453 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8:
454 ; CHECK: # %bb.0: # %entry
455 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
456 ; CHECK-NEXT: vmadc.vx v0, v8, a0
459 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
460 <vscale x 1 x i8> %0,
464 ret <vscale x 1 x i1> %a
467 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
472 define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
473 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8:
474 ; CHECK: # %bb.0: # %entry
475 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
476 ; CHECK-NEXT: vmadc.vx v0, v8, a0
479 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
480 <vscale x 2 x i8> %0,
484 ret <vscale x 2 x i1> %a
487 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
492 define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
493 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8:
494 ; CHECK: # %bb.0: # %entry
495 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
496 ; CHECK-NEXT: vmadc.vx v0, v8, a0
499 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
500 <vscale x 4 x i8> %0,
504 ret <vscale x 4 x i1> %a
507 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
512 define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
513 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8:
514 ; CHECK: # %bb.0: # %entry
515 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
516 ; CHECK-NEXT: vmadc.vx v0, v8, a0
519 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
520 <vscale x 8 x i8> %0,
524 ret <vscale x 8 x i1> %a
527 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
532 define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
533 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8:
534 ; CHECK: # %bb.0: # %entry
535 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
536 ; CHECK-NEXT: vmadc.vx v0, v8, a0
539 %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
540 <vscale x 16 x i8> %0,
544 ret <vscale x 16 x i1> %a
547 declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
552 define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
553 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8:
554 ; CHECK: # %bb.0: # %entry
555 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
556 ; CHECK-NEXT: vmadc.vx v0, v8, a0
559 %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
560 <vscale x 32 x i8> %0,
564 ret <vscale x 32 x i1> %a
567 declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
572 define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
573 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8:
574 ; CHECK: # %bb.0: # %entry
575 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
576 ; CHECK-NEXT: vmadc.vx v0, v8, a0
579 %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
580 <vscale x 64 x i8> %0,
584 ret <vscale x 64 x i1> %a
587 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
592 define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
593 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16:
594 ; CHECK: # %bb.0: # %entry
595 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
596 ; CHECK-NEXT: vmadc.vx v0, v8, a0
599 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
600 <vscale x 1 x i16> %0,
604 ret <vscale x 1 x i1> %a
607 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
612 define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
613 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16:
614 ; CHECK: # %bb.0: # %entry
615 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
616 ; CHECK-NEXT: vmadc.vx v0, v8, a0
619 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
620 <vscale x 2 x i16> %0,
624 ret <vscale x 2 x i1> %a
627 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
632 define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
633 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16:
634 ; CHECK: # %bb.0: # %entry
635 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
636 ; CHECK-NEXT: vmadc.vx v0, v8, a0
639 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
640 <vscale x 4 x i16> %0,
644 ret <vscale x 4 x i1> %a
647 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
652 define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
653 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16:
654 ; CHECK: # %bb.0: # %entry
655 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
656 ; CHECK-NEXT: vmadc.vx v0, v8, a0
659 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
660 <vscale x 8 x i16> %0,
664 ret <vscale x 8 x i1> %a
667 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
672 define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
673 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16:
674 ; CHECK: # %bb.0: # %entry
675 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
676 ; CHECK-NEXT: vmadc.vx v0, v8, a0
679 %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
680 <vscale x 16 x i16> %0,
684 ret <vscale x 16 x i1> %a
687 declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
692 define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
693 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16:
694 ; CHECK: # %bb.0: # %entry
695 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
696 ; CHECK-NEXT: vmadc.vx v0, v8, a0
699 %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
700 <vscale x 32 x i16> %0,
704 ret <vscale x 32 x i1> %a
707 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
712 define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
713 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32:
714 ; CHECK: # %bb.0: # %entry
715 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
716 ; CHECK-NEXT: vmadc.vx v0, v8, a0
719 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
720 <vscale x 1 x i32> %0,
724 ret <vscale x 1 x i1> %a
727 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
732 define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
733 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32:
734 ; CHECK: # %bb.0: # %entry
735 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
736 ; CHECK-NEXT: vmadc.vx v0, v8, a0
739 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
740 <vscale x 2 x i32> %0,
744 ret <vscale x 2 x i1> %a
747 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
752 define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
753 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32:
754 ; CHECK: # %bb.0: # %entry
755 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
756 ; CHECK-NEXT: vmadc.vx v0, v8, a0
759 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
760 <vscale x 4 x i32> %0,
764 ret <vscale x 4 x i1> %a
767 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
772 define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
773 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32:
774 ; CHECK: # %bb.0: # %entry
775 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
776 ; CHECK-NEXT: vmadc.vx v0, v8, a0
779 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
780 <vscale x 8 x i32> %0,
784 ret <vscale x 8 x i1> %a
787 declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
792 define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
793 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32:
794 ; CHECK: # %bb.0: # %entry
795 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
796 ; CHECK-NEXT: vmadc.vx v0, v8, a0
799 %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
800 <vscale x 16 x i32> %0,
804 ret <vscale x 16 x i1> %a
807 declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
812 define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
813 ; RV32-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
814 ; RV32: # %bb.0: # %entry
815 ; RV32-NEXT: addi sp, sp, -16
816 ; RV32-NEXT: sw a1, 12(sp)
817 ; RV32-NEXT: sw a0, 8(sp)
818 ; RV32-NEXT: addi a0, sp, 8
819 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
820 ; RV32-NEXT: vlse64.v v9, (a0), zero
821 ; RV32-NEXT: vmadc.vv v0, v8, v9
822 ; RV32-NEXT: addi sp, sp, 16
825 ; RV64-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
826 ; RV64: # %bb.0: # %entry
827 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
828 ; RV64-NEXT: vmadc.vx v0, v8, a0
831 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
832 <vscale x 1 x i64> %0,
836 ret <vscale x 1 x i1> %a
839 declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
844 define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
845 ; RV32-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
846 ; RV32: # %bb.0: # %entry
847 ; RV32-NEXT: addi sp, sp, -16
848 ; RV32-NEXT: sw a1, 12(sp)
849 ; RV32-NEXT: sw a0, 8(sp)
850 ; RV32-NEXT: addi a0, sp, 8
851 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
852 ; RV32-NEXT: vlse64.v v10, (a0), zero
853 ; RV32-NEXT: vmadc.vv v0, v8, v10
854 ; RV32-NEXT: addi sp, sp, 16
857 ; RV64-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
858 ; RV64: # %bb.0: # %entry
859 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
860 ; RV64-NEXT: vmadc.vx v0, v8, a0
863 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
864 <vscale x 2 x i64> %0,
868 ret <vscale x 2 x i1> %a
871 declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
876 define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
877 ; RV32-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
878 ; RV32: # %bb.0: # %entry
879 ; RV32-NEXT: addi sp, sp, -16
880 ; RV32-NEXT: sw a1, 12(sp)
881 ; RV32-NEXT: sw a0, 8(sp)
882 ; RV32-NEXT: addi a0, sp, 8
883 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
884 ; RV32-NEXT: vlse64.v v12, (a0), zero
885 ; RV32-NEXT: vmadc.vv v0, v8, v12
886 ; RV32-NEXT: addi sp, sp, 16
889 ; RV64-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
890 ; RV64: # %bb.0: # %entry
891 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
892 ; RV64-NEXT: vmadc.vx v0, v8, a0
895 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
896 <vscale x 4 x i64> %0,
900 ret <vscale x 4 x i1> %a
903 declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
908 define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
909 ; RV32-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
910 ; RV32: # %bb.0: # %entry
911 ; RV32-NEXT: addi sp, sp, -16
912 ; RV32-NEXT: sw a1, 12(sp)
913 ; RV32-NEXT: sw a0, 8(sp)
914 ; RV32-NEXT: addi a0, sp, 8
915 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
916 ; RV32-NEXT: vlse64.v v16, (a0), zero
917 ; RV32-NEXT: vmadc.vv v0, v8, v16
918 ; RV32-NEXT: addi sp, sp, 16
921 ; RV64-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
922 ; RV64: # %bb.0: # %entry
923 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
924 ; RV64-NEXT: vmadc.vx v0, v8, a0
927 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
928 <vscale x 8 x i64> %0,
932 ret <vscale x 8 x i1> %a
935 define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
936 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8:
937 ; CHECK: # %bb.0: # %entry
938 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
939 ; CHECK-NEXT: vmadc.vi v0, v8, 9
942 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
943 <vscale x 1 x i8> %0,
947 ret <vscale x 1 x i1> %a
950 define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
951 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8:
952 ; CHECK: # %bb.0: # %entry
953 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
954 ; CHECK-NEXT: vmadc.vi v0, v8, -9
957 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
958 <vscale x 2 x i8> %0,
962 ret <vscale x 2 x i1> %a
965 define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
966 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8:
967 ; CHECK: # %bb.0: # %entry
968 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
969 ; CHECK-NEXT: vmadc.vi v0, v8, 9
972 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
973 <vscale x 4 x i8> %0,
977 ret <vscale x 4 x i1> %a
980 define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
981 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8:
982 ; CHECK: # %bb.0: # %entry
983 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
984 ; CHECK-NEXT: vmadc.vi v0, v8, -9
987 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
988 <vscale x 8 x i8> %0,
992 ret <vscale x 8 x i1> %a
995 define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
996 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8:
997 ; CHECK: # %bb.0: # %entry
998 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
999 ; CHECK-NEXT: vmadc.vi v0, v8, 9
1002 %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
1003 <vscale x 16 x i8> %0,
1007 ret <vscale x 16 x i1> %a
1010 define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
1011 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8:
1012 ; CHECK: # %bb.0: # %entry
1013 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1014 ; CHECK-NEXT: vmadc.vi v0, v8, -9
1017 %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
1018 <vscale x 32 x i8> %0,
1022 ret <vscale x 32 x i1> %a
1025 define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
1026 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8:
1027 ; CHECK: # %bb.0: # %entry
1028 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
1029 ; CHECK-NEXT: vmadc.vi v0, v8, 9
1032 %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
1033 <vscale x 64 x i8> %0,
1037 ret <vscale x 64 x i1> %a
1040 define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
1041 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16:
1042 ; CHECK: # %bb.0: # %entry
1043 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1044 ; CHECK-NEXT: vmadc.vi v0, v8, -9
1047 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
1048 <vscale x 1 x i16> %0,
1052 ret <vscale x 1 x i1> %a
1055 define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
1056 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16:
1057 ; CHECK: # %bb.0: # %entry
1058 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1059 ; CHECK-NEXT: vmadc.vi v0, v8, 9
1062 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
1063 <vscale x 2 x i16> %0,
1067 ret <vscale x 2 x i1> %a
1070 define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
1071 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16:
1072 ; CHECK: # %bb.0: # %entry
1073 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1074 ; CHECK-NEXT: vmadc.vi v0, v8, -9
1077 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
1078 <vscale x 4 x i16> %0,
1082 ret <vscale x 4 x i1> %a
1085 define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
1086 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16:
1087 ; CHECK: # %bb.0: # %entry
1088 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1089 ; CHECK-NEXT: vmadc.vi v0, v8, 9
1092 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
1093 <vscale x 8 x i16> %0,
1097 ret <vscale x 8 x i1> %a
1100 define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
1101 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16:
1102 ; CHECK: # %bb.0: # %entry
1103 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1104 ; CHECK-NEXT: vmadc.vi v0, v8, -9
1107 %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
1108 <vscale x 16 x i16> %0,
1112 ret <vscale x 16 x i1> %a
1115 define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
1116 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16:
1117 ; CHECK: # %bb.0: # %entry
1118 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1119 ; CHECK-NEXT: vmadc.vi v0, v8, 9
1122 %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
1123 <vscale x 32 x i16> %0,
1127 ret <vscale x 32 x i1> %a
1130 define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
1131 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32:
1132 ; CHECK: # %bb.0: # %entry
1133 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1134 ; CHECK-NEXT: vmadc.vi v0, v8, -9
1137 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
1138 <vscale x 1 x i32> %0,
1142 ret <vscale x 1 x i1> %a
1145 define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
1146 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32:
1147 ; CHECK: # %bb.0: # %entry
1148 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1149 ; CHECK-NEXT: vmadc.vi v0, v8, 9
1152 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
1153 <vscale x 2 x i32> %0,
1157 ret <vscale x 2 x i1> %a
1160 define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
1161 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32:
1162 ; CHECK: # %bb.0: # %entry
1163 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1164 ; CHECK-NEXT: vmadc.vi v0, v8, -9
1167 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
1168 <vscale x 4 x i32> %0,
1172 ret <vscale x 4 x i1> %a
1175 define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
1176 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32:
1177 ; CHECK: # %bb.0: # %entry
1178 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1179 ; CHECK-NEXT: vmadc.vi v0, v8, 9
1182 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
1183 <vscale x 8 x i32> %0,
1187 ret <vscale x 8 x i1> %a
1190 define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
1191 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32:
1192 ; CHECK: # %bb.0: # %entry
1193 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1194 ; CHECK-NEXT: vmadc.vi v0, v8, -9
1197 %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
1198 <vscale x 16 x i32> %0,
1202 ret <vscale x 16 x i1> %a
1205 define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
1206 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64:
1207 ; CHECK: # %bb.0: # %entry
1208 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1209 ; CHECK-NEXT: vmadc.vi v0, v8, 9
1212 %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
1213 <vscale x 1 x i64> %0,
1217 ret <vscale x 1 x i1> %a
1220 define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
1221 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64:
1222 ; CHECK: # %bb.0: # %entry
1223 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1224 ; CHECK-NEXT: vmadc.vi v0, v8, -9
1227 %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
1228 <vscale x 2 x i64> %0,
1232 ret <vscale x 2 x i1> %a
1235 define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
1236 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64:
1237 ; CHECK: # %bb.0: # %entry
1238 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1239 ; CHECK-NEXT: vmadc.vi v0, v8, 9
1242 %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
1243 <vscale x 4 x i64> %0,
1247 ret <vscale x 4 x i1> %a
1250 define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
1251 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64:
1252 ; CHECK: # %bb.0: # %entry
1253 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1254 ; CHECK-NEXT: vmadc.vi v0, v8, -9
1257 %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
1258 <vscale x 8 x i64> %0,
1262 ret <vscale x 8 x i1> %a