1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvqmaccqoq \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvqmaccqoq \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
7 declare <vscale x 2 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv2i32.nxv8i8.nxv8i8(
13 define <vscale x 2 x i32> @intrinsic_vqmacc_4x8x4_tu_i32m1(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
14 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m1:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
17 ; CHECK-NEXT: sf.vqmacc.4x8x4 v8, v9, v10
20 %a = call <vscale x 2 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv2i32.nxv8i8.nxv8i8(
21 <vscale x 2 x i32> %0,
26 ret <vscale x 2 x i32> %a
29 define <vscale x 2 x i32> @intrinsic_vqmacc_4x8x4_ta_i32m1(<vscale x 2 x i32> %0, <vscale x 8 x i8> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
30 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_ta_i32m1:
31 ; CHECK: # %bb.0: # %entry
32 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
33 ; CHECK-NEXT: sf.vqmacc.4x8x4 v8, v9, v10
36 %a = call <vscale x 2 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv2i32.nxv8i8.nxv8i8(
37 <vscale x 2 x i32> %0,
42 ret <vscale x 2 x i32> %a
45 declare <vscale x 4 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv4i32.nxv8i8.nxv16i8(
51 define <vscale x 4 x i32> @intrinsic_vqmacc_4x8x4_tu_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
52 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m2:
53 ; CHECK: # %bb.0: # %entry
54 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
55 ; CHECK-NEXT: sf.vqmacc.4x8x4 v8, v10, v11
58 %a = call <vscale x 4 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv4i32.nxv8i8.nxv16i8(
59 <vscale x 4 x i32> %0,
64 ret <vscale x 4 x i32> %a
67 define <vscale x 4 x i32> @intrinsic_vqmacc_4x8x4_ta_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
68 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_ta_i32m2:
69 ; CHECK: # %bb.0: # %entry
70 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
71 ; CHECK-NEXT: sf.vqmacc.4x8x4 v8, v10, v11
74 %a = call <vscale x 4 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv4i32.nxv8i8.nxv16i8(
75 <vscale x 4 x i32> %0,
80 ret <vscale x 4 x i32> %a
83 declare <vscale x 8 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv8i32.nxv8i8.nxv32i8(
89 define <vscale x 8 x i32> @intrinsic_vqmacc_4x8x4_tu_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
90 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m4:
91 ; CHECK: # %bb.0: # %entry
92 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
93 ; CHECK-NEXT: sf.vqmacc.4x8x4 v8, v12, v14
96 %a = call <vscale x 8 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv8i32.nxv8i8.nxv32i8(
97 <vscale x 8 x i32> %0,
99 <vscale x 16 x i8> %2,
102 ret <vscale x 8 x i32> %a
105 define <vscale x 8 x i32> @intrinsic_vqmacc_4x8x4_ta_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
106 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_ta_i32m4:
107 ; CHECK: # %bb.0: # %entry
108 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
109 ; CHECK-NEXT: sf.vqmacc.4x8x4 v8, v12, v14
112 %a = call <vscale x 8 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv8i32.nxv8i8.nxv32i8(
113 <vscale x 8 x i32> %0,
114 <vscale x 8 x i8> %1,
115 <vscale x 16 x i8> %2,
118 ret <vscale x 8 x i32> %a
121 declare <vscale x 16 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv16i32.nxv8i8.nxv64i8(
127 define <vscale x 16 x i32> @intrinsic_vqmacc_4x8x4_tu_i32m8(<vscale x 16 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
128 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m8:
129 ; CHECK: # %bb.0: # %entry
130 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
131 ; CHECK-NEXT: sf.vqmacc.4x8x4 v8, v16, v20
134 %a = call <vscale x 16 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv16i32.nxv8i8.nxv64i8(
135 <vscale x 16 x i32> %0,
136 <vscale x 8 x i8> %1,
137 <vscale x 32 x i8> %2,
140 ret <vscale x 16 x i32> %a
143 define <vscale x 16 x i32> @intrinsic_vqmacc_4x8x4_ta_i32m8(<vscale x 16 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
144 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_ta_i32m8:
145 ; CHECK: # %bb.0: # %entry
146 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
147 ; CHECK-NEXT: sf.vqmacc.4x8x4 v8, v16, v20
150 %a = call <vscale x 16 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv16i32.nxv8i8.nxv64i8(
151 <vscale x 16 x i32> %0,
152 <vscale x 8 x i8> %1,
153 <vscale x 32 x i8> %2,
156 ret <vscale x 16 x i32> %a