1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
8 declare <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
9 declare <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1>, <vscale x 1 x i8>, <vscale x 1 x i8>, i32)
10 declare <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1>, <vscale x 1 x i8>, <vscale x 1 x i8>, i32)
12 define <vscale x 1 x i8> @vmadd_vv_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
13 ; CHECK-LABEL: vmadd_vv_nxv1i8:
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
16 ; CHECK-NEXT: vmadd.vv v9, v8, v10
17 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, ma
18 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
20 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
21 %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
22 %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
23 ret <vscale x 1 x i8> %u
26 define <vscale x 1 x i8> @vmadd_vv_nxv1i8_unmasked(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
27 ; CHECK-LABEL: vmadd_vv_nxv1i8_unmasked:
29 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
30 ; CHECK-NEXT: vmadd.vv v9, v8, v10
31 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, ma
32 ; CHECK-NEXT: vmv.v.v v8, v9
34 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
35 %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
36 %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
37 ret <vscale x 1 x i8> %u
40 define <vscale x 1 x i8> @vmadd_vx_nxv1i8(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
41 ; CHECK-LABEL: vmadd_vx_nxv1i8:
43 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
44 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
46 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
47 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
48 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
49 %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
50 %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
51 ret <vscale x 1 x i8> %u
54 define <vscale x 1 x i8> @vmadd_vx_nxv1i8_unmasked(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
55 ; CHECK-LABEL: vmadd_vx_nxv1i8_unmasked:
57 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
58 ; CHECK-NEXT: vmadd.vx v8, a0, v9
60 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
61 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
62 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
63 %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
64 %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
65 ret <vscale x 1 x i8> %u
68 define <vscale x 1 x i8> @vmadd_vv_nxv1i8_ta(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
69 ; CHECK-LABEL: vmadd_vv_nxv1i8_ta:
71 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
72 ; CHECK-NEXT: vmadd.vv v9, v8, v10
73 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
75 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
76 %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
77 %u = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
78 ret <vscale x 1 x i8> %u
81 define <vscale x 1 x i8> @vmadd_vx_nxv1i8_ta(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
82 ; CHECK-LABEL: vmadd_vx_nxv1i8_ta:
84 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
85 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
87 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
88 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
89 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
90 %y = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
91 %u = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %a, i32 %evl)
92 ret <vscale x 1 x i8> %u
95 declare <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
96 declare <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
97 declare <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>, <vscale x 2 x i8>, i32)
98 declare <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>, <vscale x 2 x i8>, i32)
100 define <vscale x 2 x i8> @vmadd_vv_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
101 ; CHECK-LABEL: vmadd_vv_nxv2i8:
103 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
104 ; CHECK-NEXT: vmadd.vv v9, v8, v10
105 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
106 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
108 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
109 %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
110 %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
111 ret <vscale x 2 x i8> %u
114 define <vscale x 2 x i8> @vmadd_vv_nxv2i8_unmasked(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
115 ; CHECK-LABEL: vmadd_vv_nxv2i8_unmasked:
117 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
118 ; CHECK-NEXT: vmadd.vv v9, v8, v10
119 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, ma
120 ; CHECK-NEXT: vmv.v.v v8, v9
122 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
123 %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
124 %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
125 ret <vscale x 2 x i8> %u
128 define <vscale x 2 x i8> @vmadd_vx_nxv2i8(<vscale x 2 x i8> %a, i8 %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
129 ; CHECK-LABEL: vmadd_vx_nxv2i8:
131 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
132 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
134 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
135 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
136 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
137 %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
138 %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
139 ret <vscale x 2 x i8> %u
142 define <vscale x 2 x i8> @vmadd_vx_nxv2i8_unmasked(<vscale x 2 x i8> %a, i8 %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
143 ; CHECK-LABEL: vmadd_vx_nxv2i8_unmasked:
145 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
146 ; CHECK-NEXT: vmadd.vx v8, a0, v9
148 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
149 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
150 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
151 %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
152 %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
153 ret <vscale x 2 x i8> %u
156 define <vscale x 2 x i8> @vmadd_vv_nxv2i8_ta(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
157 ; CHECK-LABEL: vmadd_vv_nxv2i8_ta:
159 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
160 ; CHECK-NEXT: vmadd.vv v9, v8, v10
161 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
163 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
164 %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
165 %u = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
166 ret <vscale x 2 x i8> %u
169 define <vscale x 2 x i8> @vmadd_vx_nxv2i8_ta(<vscale x 2 x i8> %a, i8 %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
170 ; CHECK-LABEL: vmadd_vx_nxv2i8_ta:
172 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
173 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
175 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
176 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
177 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
178 %y = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
179 %u = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %a, i32 %evl)
180 ret <vscale x 2 x i8> %u
183 declare <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
184 declare <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
185 declare <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1>, <vscale x 4 x i8>, <vscale x 4 x i8>, i32)
186 declare <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1>, <vscale x 4 x i8>, <vscale x 4 x i8>, i32)
188 define <vscale x 4 x i8> @vmadd_vv_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
189 ; CHECK-LABEL: vmadd_vv_nxv4i8:
191 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
192 ; CHECK-NEXT: vmadd.vv v9, v8, v10
193 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma
194 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
196 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
197 %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
198 %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
199 ret <vscale x 4 x i8> %u
202 define <vscale x 4 x i8> @vmadd_vv_nxv4i8_unmasked(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
203 ; CHECK-LABEL: vmadd_vv_nxv4i8_unmasked:
205 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
206 ; CHECK-NEXT: vmadd.vv v9, v8, v10
207 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma
208 ; CHECK-NEXT: vmv.v.v v8, v9
210 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
211 %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
212 %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
213 ret <vscale x 4 x i8> %u
216 define <vscale x 4 x i8> @vmadd_vx_nxv4i8(<vscale x 4 x i8> %a, i8 %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
217 ; CHECK-LABEL: vmadd_vx_nxv4i8:
219 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
220 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
222 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
223 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
224 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
225 %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
226 %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
227 ret <vscale x 4 x i8> %u
230 define <vscale x 4 x i8> @vmadd_vx_nxv4i8_unmasked(<vscale x 4 x i8> %a, i8 %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
231 ; CHECK-LABEL: vmadd_vx_nxv4i8_unmasked:
233 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
234 ; CHECK-NEXT: vmadd.vx v8, a0, v9
236 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
237 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
238 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
239 %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
240 %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
241 ret <vscale x 4 x i8> %u
244 define <vscale x 4 x i8> @vmadd_vv_nxv4i8_ta(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
245 ; CHECK-LABEL: vmadd_vv_nxv4i8_ta:
247 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
248 ; CHECK-NEXT: vmadd.vv v9, v8, v10
249 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
251 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
252 %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
253 %u = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
254 ret <vscale x 4 x i8> %u
257 define <vscale x 4 x i8> @vmadd_vx_nxv4i8_ta(<vscale x 4 x i8> %a, i8 %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
258 ; CHECK-LABEL: vmadd_vx_nxv4i8_ta:
260 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
261 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
263 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
264 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
265 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
266 %y = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
267 %u = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %a, i32 %evl)
268 ret <vscale x 4 x i8> %u
271 declare <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
272 declare <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
273 declare <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1>, <vscale x 8 x i8>, <vscale x 8 x i8>, i32)
274 declare <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1>, <vscale x 8 x i8>, <vscale x 8 x i8>, i32)
276 define <vscale x 8 x i8> @vmadd_vv_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
277 ; CHECK-LABEL: vmadd_vv_nxv8i8:
279 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
280 ; CHECK-NEXT: vmadd.vv v9, v8, v10
281 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma
282 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
284 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
285 %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
286 %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
287 ret <vscale x 8 x i8> %u
290 define <vscale x 8 x i8> @vmadd_vv_nxv8i8_unmasked(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
291 ; CHECK-LABEL: vmadd_vv_nxv8i8_unmasked:
293 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
294 ; CHECK-NEXT: vmadd.vv v9, v8, v10
295 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, ma
296 ; CHECK-NEXT: vmv.v.v v8, v9
298 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
299 %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
300 %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
301 ret <vscale x 8 x i8> %u
304 define <vscale x 8 x i8> @vmadd_vx_nxv8i8(<vscale x 8 x i8> %a, i8 %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
305 ; CHECK-LABEL: vmadd_vx_nxv8i8:
307 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
308 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
310 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
311 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
312 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
313 %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
314 %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
315 ret <vscale x 8 x i8> %u
318 define <vscale x 8 x i8> @vmadd_vx_nxv8i8_unmasked(<vscale x 8 x i8> %a, i8 %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
319 ; CHECK-LABEL: vmadd_vx_nxv8i8_unmasked:
321 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
322 ; CHECK-NEXT: vmadd.vx v8, a0, v9
324 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
325 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
326 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
327 %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
328 %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
329 ret <vscale x 8 x i8> %u
332 define <vscale x 8 x i8> @vmadd_vv_nxv8i8_ta(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
333 ; CHECK-LABEL: vmadd_vv_nxv8i8_ta:
335 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
336 ; CHECK-NEXT: vmadd.vv v9, v8, v10
337 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
339 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
340 %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
341 %u = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
342 ret <vscale x 8 x i8> %u
345 define <vscale x 8 x i8> @vmadd_vx_nxv8i8_ta(<vscale x 8 x i8> %a, i8 %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
346 ; CHECK-LABEL: vmadd_vx_nxv8i8_ta:
348 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
349 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
351 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
352 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
353 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
354 %y = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
355 %u = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %a, i32 %evl)
356 ret <vscale x 8 x i8> %u
359 declare <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
360 declare <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
361 declare <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32)
362 declare <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32)
364 define <vscale x 16 x i8> @vmadd_vv_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
365 ; CHECK-LABEL: vmadd_vv_nxv16i8:
367 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
368 ; CHECK-NEXT: vmadd.vv v10, v8, v12
369 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, ma
370 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
372 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
373 %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
374 %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
375 ret <vscale x 16 x i8> %u
378 define <vscale x 16 x i8> @vmadd_vv_nxv16i8_unmasked(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
379 ; CHECK-LABEL: vmadd_vv_nxv16i8_unmasked:
381 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
382 ; CHECK-NEXT: vmadd.vv v10, v8, v12
383 ; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, ma
384 ; CHECK-NEXT: vmv.v.v v8, v10
386 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
387 %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
388 %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
389 ret <vscale x 16 x i8> %u
392 define <vscale x 16 x i8> @vmadd_vx_nxv16i8(<vscale x 16 x i8> %a, i8 %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
393 ; CHECK-LABEL: vmadd_vx_nxv16i8:
395 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
396 ; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t
398 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
399 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
400 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
401 %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
402 %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
403 ret <vscale x 16 x i8> %u
406 define <vscale x 16 x i8> @vmadd_vx_nxv16i8_unmasked(<vscale x 16 x i8> %a, i8 %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
407 ; CHECK-LABEL: vmadd_vx_nxv16i8_unmasked:
409 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
410 ; CHECK-NEXT: vmadd.vx v8, a0, v10
412 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
413 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
414 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
415 %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
416 %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
417 ret <vscale x 16 x i8> %u
420 define <vscale x 16 x i8> @vmadd_vv_nxv16i8_ta(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
421 ; CHECK-LABEL: vmadd_vv_nxv16i8_ta:
423 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
424 ; CHECK-NEXT: vmadd.vv v10, v8, v12
425 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
427 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
428 %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
429 %u = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
430 ret <vscale x 16 x i8> %u
433 define <vscale x 16 x i8> @vmadd_vx_nxv16i8_ta(<vscale x 16 x i8> %a, i8 %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
434 ; CHECK-LABEL: vmadd_vx_nxv16i8_ta:
436 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
437 ; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t
439 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
440 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
441 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
442 %y = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
443 %u = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %a, i32 %evl)
444 ret <vscale x 16 x i8> %u
447 declare <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
448 declare <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
449 declare <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1>, <vscale x 32 x i8>, <vscale x 32 x i8>, i32)
450 declare <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1>, <vscale x 32 x i8>, <vscale x 32 x i8>, i32)
452 define <vscale x 32 x i8> @vmadd_vv_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
453 ; CHECK-LABEL: vmadd_vv_nxv32i8:
455 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
456 ; CHECK-NEXT: vmadd.vv v12, v8, v16
457 ; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, ma
458 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
460 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
461 %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
462 %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
463 ret <vscale x 32 x i8> %u
466 define <vscale x 32 x i8> @vmadd_vv_nxv32i8_unmasked(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
467 ; CHECK-LABEL: vmadd_vv_nxv32i8_unmasked:
469 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
470 ; CHECK-NEXT: vmadd.vv v12, v8, v16
471 ; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, ma
472 ; CHECK-NEXT: vmv.v.v v8, v12
474 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
475 %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
476 %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
477 ret <vscale x 32 x i8> %u
480 define <vscale x 32 x i8> @vmadd_vx_nxv32i8(<vscale x 32 x i8> %a, i8 %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
481 ; CHECK-LABEL: vmadd_vx_nxv32i8:
483 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
484 ; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t
486 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
487 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
488 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
489 %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
490 %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
491 ret <vscale x 32 x i8> %u
494 define <vscale x 32 x i8> @vmadd_vx_nxv32i8_unmasked(<vscale x 32 x i8> %a, i8 %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
495 ; CHECK-LABEL: vmadd_vx_nxv32i8_unmasked:
497 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
498 ; CHECK-NEXT: vmadd.vx v8, a0, v12
500 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
501 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
502 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
503 %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
504 %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
505 ret <vscale x 32 x i8> %u
508 define <vscale x 32 x i8> @vmadd_vv_nxv32i8_ta(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
509 ; CHECK-LABEL: vmadd_vv_nxv32i8_ta:
511 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
512 ; CHECK-NEXT: vmadd.vv v12, v8, v16
513 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
515 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
516 %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
517 %u = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
518 ret <vscale x 32 x i8> %u
521 define <vscale x 32 x i8> @vmadd_vx_nxv32i8_ta(<vscale x 32 x i8> %a, i8 %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
522 ; CHECK-LABEL: vmadd_vx_nxv32i8_ta:
524 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
525 ; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t
527 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
528 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
529 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
530 %y = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
531 %u = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %a, i32 %evl)
532 ret <vscale x 32 x i8> %u
535 declare <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
536 declare <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
537 declare <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1>, <vscale x 64 x i8>, <vscale x 64 x i8>, i32)
538 declare <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1>, <vscale x 64 x i8>, <vscale x 64 x i8>, i32)
540 define <vscale x 64 x i8> @vmadd_vv_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
541 ; CHECK-LABEL: vmadd_vv_nxv64i8:
543 ; CHECK-NEXT: vl8r.v v24, (a0)
544 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
545 ; CHECK-NEXT: vmacc.vv v24, v8, v16
546 ; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, ma
547 ; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
549 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
550 %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
551 %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
552 ret <vscale x 64 x i8> %u
555 define <vscale x 64 x i8> @vmadd_vv_nxv64i8_unmasked(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
556 ; CHECK-LABEL: vmadd_vv_nxv64i8_unmasked:
558 ; CHECK-NEXT: vl8r.v v24, (a0)
559 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
560 ; CHECK-NEXT: vmacc.vv v24, v8, v16
561 ; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, ma
562 ; CHECK-NEXT: vmv.v.v v8, v24
564 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
565 %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
566 %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> splat (i1 -1), <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
567 ret <vscale x 64 x i8> %u
570 define <vscale x 64 x i8> @vmadd_vx_nxv64i8(<vscale x 64 x i8> %a, i8 %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
571 ; CHECK-LABEL: vmadd_vx_nxv64i8:
573 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu
574 ; CHECK-NEXT: vmadd.vx v8, a0, v16, v0.t
576 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
577 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
578 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
579 %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
580 %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
581 ret <vscale x 64 x i8> %u
584 define <vscale x 64 x i8> @vmadd_vx_nxv64i8_unmasked(<vscale x 64 x i8> %a, i8 %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
585 ; CHECK-LABEL: vmadd_vx_nxv64i8_unmasked:
587 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
588 ; CHECK-NEXT: vmadd.vx v8, a0, v16
590 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
591 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
592 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
593 %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
594 %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> splat (i1 -1), <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
595 ret <vscale x 64 x i8> %u
598 define <vscale x 64 x i8> @vmadd_vv_nxv64i8_ta(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
599 ; CHECK-LABEL: vmadd_vv_nxv64i8_ta:
601 ; CHECK-NEXT: vl8r.v v24, (a0)
602 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
603 ; CHECK-NEXT: vmacc.vv v24, v8, v16
604 ; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
606 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
607 %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
608 %u = call <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
609 ret <vscale x 64 x i8> %u
612 define <vscale x 64 x i8> @vmadd_vx_nxv64i8_ta(<vscale x 64 x i8> %a, i8 %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
613 ; CHECK-LABEL: vmadd_vx_nxv64i8_ta:
615 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
616 ; CHECK-NEXT: vmadd.vx v8, a0, v16, v0.t
618 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
619 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
620 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
621 %y = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %c, <vscale x 64 x i1> splat (i1 -1), i32 %evl)
622 %u = call <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %a, i32 %evl)
623 ret <vscale x 64 x i8> %u
626 declare <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
627 declare <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
628 declare <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1>, <vscale x 1 x i16>, <vscale x 1 x i16>, i32)
629 declare <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1>, <vscale x 1 x i16>, <vscale x 1 x i16>, i32)
631 define <vscale x 1 x i16> @vmadd_vv_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
632 ; CHECK-LABEL: vmadd_vv_nxv1i16:
634 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
635 ; CHECK-NEXT: vmadd.vv v9, v8, v10
636 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, ma
637 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
639 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
640 %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
641 %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
642 ret <vscale x 1 x i16> %u
645 define <vscale x 1 x i16> @vmadd_vv_nxv1i16_unmasked(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
646 ; CHECK-LABEL: vmadd_vv_nxv1i16_unmasked:
648 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
649 ; CHECK-NEXT: vmadd.vv v9, v8, v10
650 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, ma
651 ; CHECK-NEXT: vmv.v.v v8, v9
653 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
654 %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
655 %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
656 ret <vscale x 1 x i16> %u
659 define <vscale x 1 x i16> @vmadd_vx_nxv1i16(<vscale x 1 x i16> %a, i16 %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
660 ; CHECK-LABEL: vmadd_vx_nxv1i16:
662 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
663 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
665 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
666 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
667 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
668 %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
669 %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
670 ret <vscale x 1 x i16> %u
673 define <vscale x 1 x i16> @vmadd_vx_nxv1i16_unmasked(<vscale x 1 x i16> %a, i16 %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
674 ; CHECK-LABEL: vmadd_vx_nxv1i16_unmasked:
676 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
677 ; CHECK-NEXT: vmadd.vx v8, a0, v9
679 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
680 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
681 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
682 %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
683 %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
684 ret <vscale x 1 x i16> %u
687 define <vscale x 1 x i16> @vmadd_vv_nxv1i16_ta(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
688 ; CHECK-LABEL: vmadd_vv_nxv1i16_ta:
690 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
691 ; CHECK-NEXT: vmadd.vv v9, v8, v10
692 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
694 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
695 %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
696 %u = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
697 ret <vscale x 1 x i16> %u
700 define <vscale x 1 x i16> @vmadd_vx_nxv1i16_ta(<vscale x 1 x i16> %a, i16 %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
701 ; CHECK-LABEL: vmadd_vx_nxv1i16_ta:
703 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
704 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
706 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
707 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
708 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
709 %y = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
710 %u = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %a, i32 %evl)
711 ret <vscale x 1 x i16> %u
714 declare <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
715 declare <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
716 declare <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32)
717 declare <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32)
719 define <vscale x 2 x i16> @vmadd_vv_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
720 ; CHECK-LABEL: vmadd_vv_nxv2i16:
722 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
723 ; CHECK-NEXT: vmadd.vv v9, v8, v10
724 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, ma
725 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
727 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
728 %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
729 %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
730 ret <vscale x 2 x i16> %u
733 define <vscale x 2 x i16> @vmadd_vv_nxv2i16_unmasked(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
734 ; CHECK-LABEL: vmadd_vv_nxv2i16_unmasked:
736 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
737 ; CHECK-NEXT: vmadd.vv v9, v8, v10
738 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, ma
739 ; CHECK-NEXT: vmv.v.v v8, v9
741 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
742 %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
743 %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
744 ret <vscale x 2 x i16> %u
747 define <vscale x 2 x i16> @vmadd_vx_nxv2i16(<vscale x 2 x i16> %a, i16 %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
748 ; CHECK-LABEL: vmadd_vx_nxv2i16:
750 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
751 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
753 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
754 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
755 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
756 %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
757 %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
758 ret <vscale x 2 x i16> %u
761 define <vscale x 2 x i16> @vmadd_vx_nxv2i16_unmasked(<vscale x 2 x i16> %a, i16 %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
762 ; CHECK-LABEL: vmadd_vx_nxv2i16_unmasked:
764 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
765 ; CHECK-NEXT: vmadd.vx v8, a0, v9
767 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
768 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
769 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
770 %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
771 %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
772 ret <vscale x 2 x i16> %u
775 define <vscale x 2 x i16> @vmadd_vv_nxv2i16_ta(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
776 ; CHECK-LABEL: vmadd_vv_nxv2i16_ta:
778 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
779 ; CHECK-NEXT: vmadd.vv v9, v8, v10
780 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
782 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
783 %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
784 %u = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
785 ret <vscale x 2 x i16> %u
788 define <vscale x 2 x i16> @vmadd_vx_nxv2i16_ta(<vscale x 2 x i16> %a, i16 %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
789 ; CHECK-LABEL: vmadd_vx_nxv2i16_ta:
791 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
792 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
794 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
795 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
796 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
797 %y = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
798 %u = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %a, i32 %evl)
799 ret <vscale x 2 x i16> %u
802 declare <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
803 declare <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
804 declare <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1>, <vscale x 4 x i16>, <vscale x 4 x i16>, i32)
805 declare <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1>, <vscale x 4 x i16>, <vscale x 4 x i16>, i32)
807 define <vscale x 4 x i16> @vmadd_vv_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
808 ; CHECK-LABEL: vmadd_vv_nxv4i16:
810 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
811 ; CHECK-NEXT: vmadd.vv v9, v8, v10
812 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, ma
813 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
815 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
816 %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
817 %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
818 ret <vscale x 4 x i16> %u
821 define <vscale x 4 x i16> @vmadd_vv_nxv4i16_unmasked(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
822 ; CHECK-LABEL: vmadd_vv_nxv4i16_unmasked:
824 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
825 ; CHECK-NEXT: vmadd.vv v9, v8, v10
826 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, ma
827 ; CHECK-NEXT: vmv.v.v v8, v9
829 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
830 %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
831 %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
832 ret <vscale x 4 x i16> %u
835 define <vscale x 4 x i16> @vmadd_vx_nxv4i16(<vscale x 4 x i16> %a, i16 %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
836 ; CHECK-LABEL: vmadd_vx_nxv4i16:
838 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
839 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
841 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
842 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
843 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
844 %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
845 %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
846 ret <vscale x 4 x i16> %u
849 define <vscale x 4 x i16> @vmadd_vx_nxv4i16_unmasked(<vscale x 4 x i16> %a, i16 %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
850 ; CHECK-LABEL: vmadd_vx_nxv4i16_unmasked:
852 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
853 ; CHECK-NEXT: vmadd.vx v8, a0, v9
855 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
856 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
857 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
858 %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
859 %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
860 ret <vscale x 4 x i16> %u
863 define <vscale x 4 x i16> @vmadd_vv_nxv4i16_ta(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
864 ; CHECK-LABEL: vmadd_vv_nxv4i16_ta:
866 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
867 ; CHECK-NEXT: vmadd.vv v9, v8, v10
868 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
870 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
871 %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
872 %u = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
873 ret <vscale x 4 x i16> %u
876 define <vscale x 4 x i16> @vmadd_vx_nxv4i16_ta(<vscale x 4 x i16> %a, i16 %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
877 ; CHECK-LABEL: vmadd_vx_nxv4i16_ta:
879 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
880 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
882 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
883 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
884 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
885 %y = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
886 %u = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %a, i32 %evl)
887 ret <vscale x 4 x i16> %u
890 declare <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
891 declare <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
892 declare <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
893 declare <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
895 define <vscale x 8 x i16> @vmadd_vv_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
896 ; CHECK-LABEL: vmadd_vv_nxv8i16:
898 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
899 ; CHECK-NEXT: vmadd.vv v10, v8, v12
900 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, ma
901 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
903 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
904 %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
905 %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
906 ret <vscale x 8 x i16> %u
909 define <vscale x 8 x i16> @vmadd_vv_nxv8i16_unmasked(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
910 ; CHECK-LABEL: vmadd_vv_nxv8i16_unmasked:
912 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
913 ; CHECK-NEXT: vmadd.vv v10, v8, v12
914 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, ma
915 ; CHECK-NEXT: vmv.v.v v8, v10
917 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
918 %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
919 %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
920 ret <vscale x 8 x i16> %u
923 define <vscale x 8 x i16> @vmadd_vx_nxv8i16(<vscale x 8 x i16> %a, i16 %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
924 ; CHECK-LABEL: vmadd_vx_nxv8i16:
926 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
927 ; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t
929 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
930 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
931 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
932 %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
933 %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
934 ret <vscale x 8 x i16> %u
937 define <vscale x 8 x i16> @vmadd_vx_nxv8i16_unmasked(<vscale x 8 x i16> %a, i16 %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
938 ; CHECK-LABEL: vmadd_vx_nxv8i16_unmasked:
940 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
941 ; CHECK-NEXT: vmadd.vx v8, a0, v10
943 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
944 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
945 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
946 %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
947 %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
948 ret <vscale x 8 x i16> %u
951 define <vscale x 8 x i16> @vmadd_vv_nxv8i16_ta(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
952 ; CHECK-LABEL: vmadd_vv_nxv8i16_ta:
954 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
955 ; CHECK-NEXT: vmadd.vv v10, v8, v12
956 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
958 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
959 %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
960 %u = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
961 ret <vscale x 8 x i16> %u
964 define <vscale x 8 x i16> @vmadd_vx_nxv8i16_ta(<vscale x 8 x i16> %a, i16 %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
965 ; CHECK-LABEL: vmadd_vx_nxv8i16_ta:
967 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
968 ; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t
970 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
971 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
972 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
973 %y = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
974 %u = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %a, i32 %evl)
975 ret <vscale x 8 x i16> %u
978 declare <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
979 declare <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
980 declare <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1>, <vscale x 16 x i16>, <vscale x 16 x i16>, i32)
981 declare <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1>, <vscale x 16 x i16>, <vscale x 16 x i16>, i32)
983 define <vscale x 16 x i16> @vmadd_vv_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
984 ; CHECK-LABEL: vmadd_vv_nxv16i16:
986 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
987 ; CHECK-NEXT: vmadd.vv v12, v8, v16
988 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, ma
989 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
991 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
992 %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
993 %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
994 ret <vscale x 16 x i16> %u
997 define <vscale x 16 x i16> @vmadd_vv_nxv16i16_unmasked(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
998 ; CHECK-LABEL: vmadd_vv_nxv16i16_unmasked:
1000 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1001 ; CHECK-NEXT: vmadd.vv v12, v8, v16
1002 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, ma
1003 ; CHECK-NEXT: vmv.v.v v8, v12
1005 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1006 %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1007 %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
1008 ret <vscale x 16 x i16> %u
1011 define <vscale x 16 x i16> @vmadd_vx_nxv16i16(<vscale x 16 x i16> %a, i16 %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1012 ; CHECK-LABEL: vmadd_vx_nxv16i16:
1014 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
1015 ; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t
1017 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
1018 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
1019 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1020 %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1021 %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
1022 ret <vscale x 16 x i16> %u
1025 define <vscale x 16 x i16> @vmadd_vx_nxv16i16_unmasked(<vscale x 16 x i16> %a, i16 %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1026 ; CHECK-LABEL: vmadd_vx_nxv16i16_unmasked:
1028 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
1029 ; CHECK-NEXT: vmadd.vx v8, a0, v12
1031 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
1032 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
1033 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1034 %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1035 %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
1036 ret <vscale x 16 x i16> %u
1039 define <vscale x 16 x i16> @vmadd_vv_nxv16i16_ta(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1040 ; CHECK-LABEL: vmadd_vv_nxv16i16_ta:
1042 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1043 ; CHECK-NEXT: vmadd.vv v12, v8, v16
1044 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
1046 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1047 %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1048 %u = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
1049 ret <vscale x 16 x i16> %u
1052 define <vscale x 16 x i16> @vmadd_vx_nxv16i16_ta(<vscale x 16 x i16> %a, i16 %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1053 ; CHECK-LABEL: vmadd_vx_nxv16i16_ta:
1055 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1056 ; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t
1058 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
1059 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
1060 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1061 %y = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1062 %u = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %a, i32 %evl)
1063 ret <vscale x 16 x i16> %u
1066 declare <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
1067 declare <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
1068 declare <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1>, <vscale x 32 x i16>, <vscale x 32 x i16>, i32)
1069 declare <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1>, <vscale x 32 x i16>, <vscale x 32 x i16>, i32)
1071 define <vscale x 32 x i16> @vmadd_vv_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1072 ; CHECK-LABEL: vmadd_vv_nxv32i16:
1074 ; CHECK-NEXT: vl8re16.v v24, (a0)
1075 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1076 ; CHECK-NEXT: vmacc.vv v24, v8, v16
1077 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, tu, ma
1078 ; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
1080 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1081 %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1082 %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
1083 ret <vscale x 32 x i16> %u
1086 define <vscale x 32 x i16> @vmadd_vv_nxv32i16_unmasked(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1087 ; CHECK-LABEL: vmadd_vv_nxv32i16_unmasked:
1089 ; CHECK-NEXT: vl8re16.v v24, (a0)
1090 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1091 ; CHECK-NEXT: vmacc.vv v24, v8, v16
1092 ; CHECK-NEXT: vsetvli zero, zero, e16, m8, tu, ma
1093 ; CHECK-NEXT: vmv.v.v v8, v24
1095 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1096 %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1097 %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
1098 ret <vscale x 32 x i16> %u
1101 define <vscale x 32 x i16> @vmadd_vx_nxv32i16(<vscale x 32 x i16> %a, i16 %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1102 ; CHECK-LABEL: vmadd_vx_nxv32i16:
1104 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu
1105 ; CHECK-NEXT: vmadd.vx v8, a0, v16, v0.t
1107 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
1108 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
1109 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1110 %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1111 %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
1112 ret <vscale x 32 x i16> %u
1115 define <vscale x 32 x i16> @vmadd_vx_nxv32i16_unmasked(<vscale x 32 x i16> %a, i16 %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1116 ; CHECK-LABEL: vmadd_vx_nxv32i16_unmasked:
1118 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma
1119 ; CHECK-NEXT: vmadd.vx v8, a0, v16
1121 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
1122 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
1123 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1124 %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1125 %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> splat (i1 -1), <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
1126 ret <vscale x 32 x i16> %u
1129 define <vscale x 32 x i16> @vmadd_vv_nxv32i16_ta(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1130 ; CHECK-LABEL: vmadd_vv_nxv32i16_ta:
1132 ; CHECK-NEXT: vl8re16.v v24, (a0)
1133 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1134 ; CHECK-NEXT: vmacc.vv v24, v8, v16
1135 ; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
1137 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1138 %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1139 %u = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
1140 ret <vscale x 32 x i16> %u
1143 define <vscale x 32 x i16> @vmadd_vx_nxv32i16_ta(<vscale x 32 x i16> %a, i16 %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1144 ; CHECK-LABEL: vmadd_vx_nxv32i16_ta:
1146 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1147 ; CHECK-NEXT: vmadd.vx v8, a0, v16, v0.t
1149 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
1150 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
1151 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1152 %y = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %c, <vscale x 32 x i1> splat (i1 -1), i32 %evl)
1153 %u = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %a, i32 %evl)
1154 ret <vscale x 32 x i16> %u
1157 declare <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
1158 declare <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
1159 declare <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1>, <vscale x 1 x i32>, <vscale x 1 x i32>, i32)
1160 declare <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1>, <vscale x 1 x i32>, <vscale x 1 x i32>, i32)
1162 define <vscale x 1 x i32> @vmadd_vv_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1163 ; CHECK-LABEL: vmadd_vv_nxv1i32:
1165 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1166 ; CHECK-NEXT: vmadd.vv v9, v8, v10
1167 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, ma
1168 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
1170 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1171 %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1172 %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
1173 ret <vscale x 1 x i32> %u
1176 define <vscale x 1 x i32> @vmadd_vv_nxv1i32_unmasked(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1177 ; CHECK-LABEL: vmadd_vv_nxv1i32_unmasked:
1179 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1180 ; CHECK-NEXT: vmadd.vv v9, v8, v10
1181 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, ma
1182 ; CHECK-NEXT: vmv.v.v v8, v9
1184 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1185 %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1186 %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
1187 ret <vscale x 1 x i32> %u
1190 define <vscale x 1 x i32> @vmadd_vx_nxv1i32(<vscale x 1 x i32> %a, i32 %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1191 ; CHECK-LABEL: vmadd_vx_nxv1i32:
1193 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
1194 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
1196 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
1197 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
1198 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1199 %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1200 %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
1201 ret <vscale x 1 x i32> %u
1204 define <vscale x 1 x i32> @vmadd_vx_nxv1i32_unmasked(<vscale x 1 x i32> %a, i32 %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1205 ; CHECK-LABEL: vmadd_vx_nxv1i32_unmasked:
1207 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
1208 ; CHECK-NEXT: vmadd.vx v8, a0, v9
1210 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
1211 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
1212 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1213 %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1214 %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
1215 ret <vscale x 1 x i32> %u
1218 define <vscale x 1 x i32> @vmadd_vv_nxv1i32_ta(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1219 ; CHECK-LABEL: vmadd_vv_nxv1i32_ta:
1221 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1222 ; CHECK-NEXT: vmadd.vv v9, v8, v10
1223 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
1225 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1226 %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1227 %u = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
1228 ret <vscale x 1 x i32> %u
1231 define <vscale x 1 x i32> @vmadd_vx_nxv1i32_ta(<vscale x 1 x i32> %a, i32 %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1232 ; CHECK-LABEL: vmadd_vx_nxv1i32_ta:
1234 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1235 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
1237 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
1238 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
1239 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1240 %y = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1241 %u = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %a, i32 %evl)
1242 ret <vscale x 1 x i32> %u
1245 declare <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
1246 declare <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
1247 declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
1248 declare <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
1250 define <vscale x 2 x i32> @vmadd_vv_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1251 ; CHECK-LABEL: vmadd_vv_nxv2i32:
1253 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1254 ; CHECK-NEXT: vmadd.vv v9, v8, v10
1255 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
1256 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
1258 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1259 %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1260 %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
1261 ret <vscale x 2 x i32> %u
1264 define <vscale x 2 x i32> @vmadd_vv_nxv2i32_unmasked(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1265 ; CHECK-LABEL: vmadd_vv_nxv2i32_unmasked:
1267 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1268 ; CHECK-NEXT: vmadd.vv v9, v8, v10
1269 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
1270 ; CHECK-NEXT: vmv.v.v v8, v9
1272 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1273 %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1274 %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
1275 ret <vscale x 2 x i32> %u
1278 define <vscale x 2 x i32> @vmadd_vx_nxv2i32(<vscale x 2 x i32> %a, i32 %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1279 ; CHECK-LABEL: vmadd_vx_nxv2i32:
1281 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
1282 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
1284 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1285 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1286 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1287 %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1288 %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
1289 ret <vscale x 2 x i32> %u
1292 define <vscale x 2 x i32> @vmadd_vx_nxv2i32_unmasked(<vscale x 2 x i32> %a, i32 %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1293 ; CHECK-LABEL: vmadd_vx_nxv2i32_unmasked:
1295 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
1296 ; CHECK-NEXT: vmadd.vx v8, a0, v9
1298 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1299 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1300 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1301 %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1302 %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
1303 ret <vscale x 2 x i32> %u
1306 define <vscale x 2 x i32> @vmadd_vv_nxv2i32_ta(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1307 ; CHECK-LABEL: vmadd_vv_nxv2i32_ta:
1309 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1310 ; CHECK-NEXT: vmadd.vv v9, v8, v10
1311 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
1313 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1314 %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1315 %u = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
1316 ret <vscale x 2 x i32> %u
1319 define <vscale x 2 x i32> @vmadd_vx_nxv2i32_ta(<vscale x 2 x i32> %a, i32 %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1320 ; CHECK-LABEL: vmadd_vx_nxv2i32_ta:
1322 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1323 ; CHECK-NEXT: vmadd.vx v8, a0, v9, v0.t
1325 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1326 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1327 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1328 %y = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1329 %u = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %a, i32 %evl)
1330 ret <vscale x 2 x i32> %u
1333 declare <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
1334 declare <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
1335 declare <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
1336 declare <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
1338 define <vscale x 4 x i32> @vmadd_vv_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1339 ; CHECK-LABEL: vmadd_vv_nxv4i32:
1341 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1342 ; CHECK-NEXT: vmadd.vv v10, v8, v12
1343 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, ma
1344 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
1346 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1347 %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1348 %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
1349 ret <vscale x 4 x i32> %u
1352 define <vscale x 4 x i32> @vmadd_vv_nxv4i32_unmasked(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1353 ; CHECK-LABEL: vmadd_vv_nxv4i32_unmasked:
1355 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1356 ; CHECK-NEXT: vmadd.vv v10, v8, v12
1357 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, ma
1358 ; CHECK-NEXT: vmv.v.v v8, v10
1360 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1361 %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1362 %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
1363 ret <vscale x 4 x i32> %u
1366 define <vscale x 4 x i32> @vmadd_vx_nxv4i32(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1367 ; CHECK-LABEL: vmadd_vx_nxv4i32:
1369 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
1370 ; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t
1372 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1373 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1374 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1375 %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1376 %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
1377 ret <vscale x 4 x i32> %u
1380 define <vscale x 4 x i32> @vmadd_vx_nxv4i32_unmasked(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1381 ; CHECK-LABEL: vmadd_vx_nxv4i32_unmasked:
1383 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
1384 ; CHECK-NEXT: vmadd.vx v8, a0, v10
1386 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1387 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1388 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1389 %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1390 %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
1391 ret <vscale x 4 x i32> %u
1394 define <vscale x 4 x i32> @vmadd_vv_nxv4i32_ta(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1395 ; CHECK-LABEL: vmadd_vv_nxv4i32_ta:
1397 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1398 ; CHECK-NEXT: vmadd.vv v10, v8, v12
1399 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
1401 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1402 %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1403 %u = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
1404 ret <vscale x 4 x i32> %u
1407 define <vscale x 4 x i32> @vmadd_vx_nxv4i32_ta(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1408 ; CHECK-LABEL: vmadd_vx_nxv4i32_ta:
1410 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1411 ; CHECK-NEXT: vmadd.vx v8, a0, v10, v0.t
1413 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1414 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1415 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1416 %y = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1417 %u = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %a, i32 %evl)
1418 ret <vscale x 4 x i32> %u
1421 declare <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
1422 declare <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
1423 declare <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1>, <vscale x 8 x i32>, <vscale x 8 x i32>, i32)
1424 declare <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1>, <vscale x 8 x i32>, <vscale x 8 x i32>, i32)
1426 define <vscale x 8 x i32> @vmadd_vv_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1427 ; CHECK-LABEL: vmadd_vv_nxv8i32:
1429 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1430 ; CHECK-NEXT: vmadd.vv v12, v8, v16
1431 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, ma
1432 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
1434 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1435 %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1436 %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
1437 ret <vscale x 8 x i32> %u
1440 define <vscale x 8 x i32> @vmadd_vv_nxv8i32_unmasked(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1441 ; CHECK-LABEL: vmadd_vv_nxv8i32_unmasked:
1443 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1444 ; CHECK-NEXT: vmadd.vv v12, v8, v16
1445 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, ma
1446 ; CHECK-NEXT: vmv.v.v v8, v12
1448 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1449 %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1450 %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
1451 ret <vscale x 8 x i32> %u
1454 define <vscale x 8 x i32> @vmadd_vx_nxv8i32(<vscale x 8 x i32> %a, i32 %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1455 ; CHECK-LABEL: vmadd_vx_nxv8i32:
1457 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
1458 ; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t
1460 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1461 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1462 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1463 %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1464 %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
1465 ret <vscale x 8 x i32> %u
1468 define <vscale x 8 x i32> @vmadd_vx_nxv8i32_unmasked(<vscale x 8 x i32> %a, i32 %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1469 ; CHECK-LABEL: vmadd_vx_nxv8i32_unmasked:
1471 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
1472 ; CHECK-NEXT: vmadd.vx v8, a0, v12
1474 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1475 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1476 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1477 %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1478 %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
1479 ret <vscale x 8 x i32> %u
1482 define <vscale x 8 x i32> @vmadd_vv_nxv8i32_ta(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1483 ; CHECK-LABEL: vmadd_vv_nxv8i32_ta:
1485 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1486 ; CHECK-NEXT: vmadd.vv v12, v8, v16
1487 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
1489 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1490 %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1491 %u = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
1492 ret <vscale x 8 x i32> %u
1495 define <vscale x 8 x i32> @vmadd_vx_nxv8i32_ta(<vscale x 8 x i32> %a, i32 %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1496 ; CHECK-LABEL: vmadd_vx_nxv8i32_ta:
1498 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1499 ; CHECK-NEXT: vmadd.vx v8, a0, v12, v0.t
1501 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1502 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1503 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1504 %y = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1505 %u = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %a, i32 %evl)
1506 ret <vscale x 8 x i32> %u
1509 declare <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1510 declare <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1511 declare <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1>, <vscale x 16 x i32>, <vscale x 16 x i32>, i32)
1512 declare <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1>, <vscale x 16 x i32>, <vscale x 16 x i32>, i32)
1514 define <vscale x 16 x i32> @vmadd_vv_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1515 ; CHECK-LABEL: vmadd_vv_nxv16i32:
1517 ; CHECK-NEXT: vl8re32.v v24, (a0)
1518 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1519 ; CHECK-NEXT: vmacc.vv v24, v8, v16
1520 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, tu, ma
1521 ; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
1523 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1524 %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1525 %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
1526 ret <vscale x 16 x i32> %u
1529 define <vscale x 16 x i32> @vmadd_vv_nxv16i32_unmasked(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1530 ; CHECK-LABEL: vmadd_vv_nxv16i32_unmasked:
1532 ; CHECK-NEXT: vl8re32.v v24, (a0)
1533 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1534 ; CHECK-NEXT: vmacc.vv v24, v8, v16
1535 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, tu, ma
1536 ; CHECK-NEXT: vmv.v.v v8, v24
1538 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1539 %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1540 %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
1541 ret <vscale x 16 x i32> %u
1544 define <vscale x 16 x i32> @vmadd_vx_nxv16i32(<vscale x 16 x i32> %a, i32 %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1545 ; CHECK-LABEL: vmadd_vx_nxv16i32:
1547 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
1548 ; CHECK-NEXT: vmadd.vx v8, a0, v16, v0.t
1550 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1551 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1552 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1553 %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1554 %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
1555 ret <vscale x 16 x i32> %u
1558 define <vscale x 16 x i32> @vmadd_vx_nxv16i32_unmasked(<vscale x 16 x i32> %a, i32 %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1559 ; CHECK-LABEL: vmadd_vx_nxv16i32_unmasked:
1561 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
1562 ; CHECK-NEXT: vmadd.vx v8, a0, v16
1564 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1565 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1566 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1567 %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1568 %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> splat (i1 -1), <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
1569 ret <vscale x 16 x i32> %u
1572 define <vscale x 16 x i32> @vmadd_vv_nxv16i32_ta(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1573 ; CHECK-LABEL: vmadd_vv_nxv16i32_ta:
1575 ; CHECK-NEXT: vl8re32.v v24, (a0)
1576 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1577 ; CHECK-NEXT: vmacc.vv v24, v8, v16
1578 ; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
1580 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1581 %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1582 %u = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
1583 ret <vscale x 16 x i32> %u
1586 define <vscale x 16 x i32> @vmadd_vx_nxv16i32_ta(<vscale x 16 x i32> %a, i32 %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1587 ; CHECK-LABEL: vmadd_vx_nxv16i32_ta:
1589 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1590 ; CHECK-NEXT: vmadd.vx v8, a0, v16, v0.t
1592 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1593 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1594 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1595 %y = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
1596 %u = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %a, i32 %evl)
1597 ret <vscale x 16 x i32> %u
1600 declare <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1601 declare <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1602 declare <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, i32)
1603 declare <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, i32)
1605 define <vscale x 1 x i64> @vmadd_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1606 ; CHECK-LABEL: vmadd_vv_nxv1i64:
1608 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1609 ; CHECK-NEXT: vmadd.vv v9, v8, v10
1610 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma
1611 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
1613 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1614 %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1615 %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
1616 ret <vscale x 1 x i64> %u
1619 define <vscale x 1 x i64> @vmadd_vv_nxv1i64_unmasked(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1620 ; CHECK-LABEL: vmadd_vv_nxv1i64_unmasked:
1622 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1623 ; CHECK-NEXT: vmadd.vv v9, v8, v10
1624 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma
1625 ; CHECK-NEXT: vmv.v.v v8, v9
1627 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1628 %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1629 %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
1630 ret <vscale x 1 x i64> %u
1633 define <vscale x 1 x i64> @vmadd_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1634 ; RV32-LABEL: vmadd_vx_nxv1i64:
1636 ; RV32-NEXT: addi sp, sp, -16
1637 ; RV32-NEXT: .cfi_def_cfa_offset 16
1638 ; RV32-NEXT: sw a0, 8(sp)
1639 ; RV32-NEXT: sw a1, 12(sp)
1640 ; RV32-NEXT: addi a0, sp, 8
1641 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1642 ; RV32-NEXT: vlse64.v v10, (a0), zero
1643 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1644 ; RV32-NEXT: vmadd.vv v10, v8, v9
1645 ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
1646 ; RV32-NEXT: vmerge.vvm v8, v8, v10, v0
1647 ; RV32-NEXT: addi sp, sp, 16
1648 ; RV32-NEXT: .cfi_def_cfa_offset 0
1651 ; RV64-LABEL: vmadd_vx_nxv1i64:
1653 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
1654 ; RV64-NEXT: vmadd.vx v8, a0, v9, v0.t
1656 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1657 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1658 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1659 %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1660 %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
1661 ret <vscale x 1 x i64> %u
1664 define <vscale x 1 x i64> @vmadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1665 ; RV32-LABEL: vmadd_vx_nxv1i64_unmasked:
1667 ; RV32-NEXT: addi sp, sp, -16
1668 ; RV32-NEXT: .cfi_def_cfa_offset 16
1669 ; RV32-NEXT: sw a0, 8(sp)
1670 ; RV32-NEXT: sw a1, 12(sp)
1671 ; RV32-NEXT: addi a0, sp, 8
1672 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1673 ; RV32-NEXT: vlse64.v v10, (a0), zero
1674 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1675 ; RV32-NEXT: vmadd.vv v10, v8, v9
1676 ; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
1677 ; RV32-NEXT: vmv.v.v v8, v10
1678 ; RV32-NEXT: addi sp, sp, 16
1679 ; RV32-NEXT: .cfi_def_cfa_offset 0
1682 ; RV64-LABEL: vmadd_vx_nxv1i64_unmasked:
1684 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
1685 ; RV64-NEXT: vmadd.vx v8, a0, v9
1687 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1688 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1689 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1690 %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1691 %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> splat (i1 -1), <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
1692 ret <vscale x 1 x i64> %u
1695 define <vscale x 1 x i64> @vmadd_vv_nxv1i64_ta(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1696 ; CHECK-LABEL: vmadd_vv_nxv1i64_ta:
1698 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1699 ; CHECK-NEXT: vmadd.vv v9, v8, v10
1700 ; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
1702 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1703 %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1704 %u = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
1705 ret <vscale x 1 x i64> %u
1708 define <vscale x 1 x i64> @vmadd_vx_nxv1i64_ta(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1709 ; RV32-LABEL: vmadd_vx_nxv1i64_ta:
1711 ; RV32-NEXT: addi sp, sp, -16
1712 ; RV32-NEXT: .cfi_def_cfa_offset 16
1713 ; RV32-NEXT: sw a0, 8(sp)
1714 ; RV32-NEXT: sw a1, 12(sp)
1715 ; RV32-NEXT: addi a0, sp, 8
1716 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1717 ; RV32-NEXT: vlse64.v v10, (a0), zero
1718 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
1719 ; RV32-NEXT: vmadd.vv v10, v8, v9
1720 ; RV32-NEXT: vmerge.vvm v8, v8, v10, v0
1721 ; RV32-NEXT: addi sp, sp, 16
1722 ; RV32-NEXT: .cfi_def_cfa_offset 0
1725 ; RV64-LABEL: vmadd_vx_nxv1i64_ta:
1727 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1728 ; RV64-NEXT: vmadd.vx v8, a0, v9, v0.t
1730 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1731 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1732 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1733 %y = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
1734 %u = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %a, i32 %evl)
1735 ret <vscale x 1 x i64> %u
1738 declare <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1739 declare <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1740 declare <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
1741 declare <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
1743 define <vscale x 2 x i64> @vmadd_vv_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1744 ; CHECK-LABEL: vmadd_vv_nxv2i64:
1746 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1747 ; CHECK-NEXT: vmadd.vv v10, v8, v12
1748 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma
1749 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
1751 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1752 %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1753 %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
1754 ret <vscale x 2 x i64> %u
1757 define <vscale x 2 x i64> @vmadd_vv_nxv2i64_unmasked(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1758 ; CHECK-LABEL: vmadd_vv_nxv2i64_unmasked:
1760 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1761 ; CHECK-NEXT: vmadd.vv v10, v8, v12
1762 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, tu, ma
1763 ; CHECK-NEXT: vmv.v.v v8, v10
1765 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1766 %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1767 %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
1768 ret <vscale x 2 x i64> %u
1771 define <vscale x 2 x i64> @vmadd_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1772 ; RV32-LABEL: vmadd_vx_nxv2i64:
1774 ; RV32-NEXT: addi sp, sp, -16
1775 ; RV32-NEXT: .cfi_def_cfa_offset 16
1776 ; RV32-NEXT: sw a0, 8(sp)
1777 ; RV32-NEXT: sw a1, 12(sp)
1778 ; RV32-NEXT: addi a0, sp, 8
1779 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1780 ; RV32-NEXT: vlse64.v v12, (a0), zero
1781 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1782 ; RV32-NEXT: vmadd.vv v12, v8, v10
1783 ; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma
1784 ; RV32-NEXT: vmerge.vvm v8, v8, v12, v0
1785 ; RV32-NEXT: addi sp, sp, 16
1786 ; RV32-NEXT: .cfi_def_cfa_offset 0
1789 ; RV64-LABEL: vmadd_vx_nxv2i64:
1791 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu
1792 ; RV64-NEXT: vmadd.vx v8, a0, v10, v0.t
1794 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1795 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1796 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1797 %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1798 %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
1799 ret <vscale x 2 x i64> %u
1802 define <vscale x 2 x i64> @vmadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %a, i64 %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1803 ; RV32-LABEL: vmadd_vx_nxv2i64_unmasked:
1805 ; RV32-NEXT: addi sp, sp, -16
1806 ; RV32-NEXT: .cfi_def_cfa_offset 16
1807 ; RV32-NEXT: sw a0, 8(sp)
1808 ; RV32-NEXT: sw a1, 12(sp)
1809 ; RV32-NEXT: addi a0, sp, 8
1810 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1811 ; RV32-NEXT: vlse64.v v12, (a0), zero
1812 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1813 ; RV32-NEXT: vmadd.vv v12, v8, v10
1814 ; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma
1815 ; RV32-NEXT: vmv.v.v v8, v12
1816 ; RV32-NEXT: addi sp, sp, 16
1817 ; RV32-NEXT: .cfi_def_cfa_offset 0
1820 ; RV64-LABEL: vmadd_vx_nxv2i64_unmasked:
1822 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma
1823 ; RV64-NEXT: vmadd.vx v8, a0, v10
1825 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1826 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1827 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1828 %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1829 %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> splat (i1 -1), <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
1830 ret <vscale x 2 x i64> %u
1833 define <vscale x 2 x i64> @vmadd_vv_nxv2i64_ta(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1834 ; CHECK-LABEL: vmadd_vv_nxv2i64_ta:
1836 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1837 ; CHECK-NEXT: vmadd.vv v10, v8, v12
1838 ; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
1840 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1841 %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1842 %u = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
1843 ret <vscale x 2 x i64> %u
1846 define <vscale x 2 x i64> @vmadd_vx_nxv2i64_ta(<vscale x 2 x i64> %a, i64 %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1847 ; RV32-LABEL: vmadd_vx_nxv2i64_ta:
1849 ; RV32-NEXT: addi sp, sp, -16
1850 ; RV32-NEXT: .cfi_def_cfa_offset 16
1851 ; RV32-NEXT: sw a0, 8(sp)
1852 ; RV32-NEXT: sw a1, 12(sp)
1853 ; RV32-NEXT: addi a0, sp, 8
1854 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1855 ; RV32-NEXT: vlse64.v v12, (a0), zero
1856 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1857 ; RV32-NEXT: vmadd.vv v12, v8, v10
1858 ; RV32-NEXT: vmerge.vvm v8, v8, v12, v0
1859 ; RV32-NEXT: addi sp, sp, 16
1860 ; RV32-NEXT: .cfi_def_cfa_offset 0
1863 ; RV64-LABEL: vmadd_vx_nxv2i64_ta:
1865 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
1866 ; RV64-NEXT: vmadd.vx v8, a0, v10, v0.t
1868 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1869 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1870 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1871 %y = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
1872 %u = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %a, i32 %evl)
1873 ret <vscale x 2 x i64> %u
1876 declare <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1877 declare <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1878 declare <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1>, <vscale x 4 x i64>, <vscale x 4 x i64>, i32)
1879 declare <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1>, <vscale x 4 x i64>, <vscale x 4 x i64>, i32)
1881 define <vscale x 4 x i64> @vmadd_vv_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1882 ; CHECK-LABEL: vmadd_vv_nxv4i64:
1884 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1885 ; CHECK-NEXT: vmadd.vv v12, v8, v16
1886 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
1887 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
1889 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1890 %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1891 %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
1892 ret <vscale x 4 x i64> %u
1895 define <vscale x 4 x i64> @vmadd_vv_nxv4i64_unmasked(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1896 ; CHECK-LABEL: vmadd_vv_nxv4i64_unmasked:
1898 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1899 ; CHECK-NEXT: vmadd.vv v12, v8, v16
1900 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
1901 ; CHECK-NEXT: vmv.v.v v8, v12
1903 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1904 %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1905 %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
1906 ret <vscale x 4 x i64> %u
1909 define <vscale x 4 x i64> @vmadd_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1910 ; RV32-LABEL: vmadd_vx_nxv4i64:
1912 ; RV32-NEXT: addi sp, sp, -16
1913 ; RV32-NEXT: .cfi_def_cfa_offset 16
1914 ; RV32-NEXT: sw a0, 8(sp)
1915 ; RV32-NEXT: sw a1, 12(sp)
1916 ; RV32-NEXT: addi a0, sp, 8
1917 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1918 ; RV32-NEXT: vlse64.v v16, (a0), zero
1919 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1920 ; RV32-NEXT: vmadd.vv v16, v8, v12
1921 ; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma
1922 ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
1923 ; RV32-NEXT: addi sp, sp, 16
1924 ; RV32-NEXT: .cfi_def_cfa_offset 0
1927 ; RV64-LABEL: vmadd_vx_nxv4i64:
1929 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu
1930 ; RV64-NEXT: vmadd.vx v8, a0, v12, v0.t
1932 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1933 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1934 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1935 %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1936 %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
1937 ret <vscale x 4 x i64> %u
1940 define <vscale x 4 x i64> @vmadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %a, i64 %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1941 ; RV32-LABEL: vmadd_vx_nxv4i64_unmasked:
1943 ; RV32-NEXT: addi sp, sp, -16
1944 ; RV32-NEXT: .cfi_def_cfa_offset 16
1945 ; RV32-NEXT: sw a0, 8(sp)
1946 ; RV32-NEXT: sw a1, 12(sp)
1947 ; RV32-NEXT: addi a0, sp, 8
1948 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1949 ; RV32-NEXT: vlse64.v v16, (a0), zero
1950 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1951 ; RV32-NEXT: vmadd.vv v16, v8, v12
1952 ; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma
1953 ; RV32-NEXT: vmv.v.v v8, v16
1954 ; RV32-NEXT: addi sp, sp, 16
1955 ; RV32-NEXT: .cfi_def_cfa_offset 0
1958 ; RV64-LABEL: vmadd_vx_nxv4i64_unmasked:
1960 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma
1961 ; RV64-NEXT: vmadd.vx v8, a0, v12
1963 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1964 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1965 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1966 %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1967 %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> splat (i1 -1), <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
1968 ret <vscale x 4 x i64> %u
1971 define <vscale x 4 x i64> @vmadd_vv_nxv4i64_ta(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1972 ; CHECK-LABEL: vmadd_vv_nxv4i64_ta:
1974 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1975 ; CHECK-NEXT: vmadd.vv v12, v8, v16
1976 ; CHECK-NEXT: vmerge.vvm v8, v8, v12, v0
1978 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1979 %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
1980 %u = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
1981 ret <vscale x 4 x i64> %u
1984 define <vscale x 4 x i64> @vmadd_vx_nxv4i64_ta(<vscale x 4 x i64> %a, i64 %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1985 ; RV32-LABEL: vmadd_vx_nxv4i64_ta:
1987 ; RV32-NEXT: addi sp, sp, -16
1988 ; RV32-NEXT: .cfi_def_cfa_offset 16
1989 ; RV32-NEXT: sw a0, 8(sp)
1990 ; RV32-NEXT: sw a1, 12(sp)
1991 ; RV32-NEXT: addi a0, sp, 8
1992 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1993 ; RV32-NEXT: vlse64.v v16, (a0), zero
1994 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1995 ; RV32-NEXT: vmadd.vv v16, v8, v12
1996 ; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
1997 ; RV32-NEXT: addi sp, sp, 16
1998 ; RV32-NEXT: .cfi_def_cfa_offset 0
2001 ; RV64-LABEL: vmadd_vx_nxv4i64_ta:
2003 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2004 ; RV64-NEXT: vmadd.vx v8, a0, v12, v0.t
2006 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
2007 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
2008 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
2009 %y = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
2010 %u = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %a, i32 %evl)
2011 ret <vscale x 4 x i64> %u
2014 declare <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
2015 declare <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
2016 declare <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1>, <vscale x 8 x i64>, <vscale x 8 x i64>, i32)
2017 declare <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1>, <vscale x 8 x i64>, <vscale x 8 x i64>, i32)
2019 define <vscale x 8 x i64> @vmadd_vv_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2020 ; CHECK-LABEL: vmadd_vv_nxv8i64:
2022 ; CHECK-NEXT: vl8re64.v v24, (a0)
2023 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2024 ; CHECK-NEXT: vmacc.vv v24, v8, v16
2025 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, ma
2026 ; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
2028 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2029 %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2030 %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
2031 ret <vscale x 8 x i64> %u
2034 define <vscale x 8 x i64> @vmadd_vv_nxv8i64_unmasked(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2035 ; CHECK-LABEL: vmadd_vv_nxv8i64_unmasked:
2037 ; CHECK-NEXT: vl8re64.v v24, (a0)
2038 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2039 ; CHECK-NEXT: vmacc.vv v24, v8, v16
2040 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, tu, ma
2041 ; CHECK-NEXT: vmv.v.v v8, v24
2043 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2044 %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2045 %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
2046 ret <vscale x 8 x i64> %u
2049 define <vscale x 8 x i64> @vmadd_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2050 ; RV32-LABEL: vmadd_vx_nxv8i64:
2052 ; RV32-NEXT: addi sp, sp, -16
2053 ; RV32-NEXT: .cfi_def_cfa_offset 16
2054 ; RV32-NEXT: sw a0, 8(sp)
2055 ; RV32-NEXT: sw a1, 12(sp)
2056 ; RV32-NEXT: addi a0, sp, 8
2057 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2058 ; RV32-NEXT: vlse64.v v24, (a0), zero
2059 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2060 ; RV32-NEXT: vmadd.vv v24, v8, v16
2061 ; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma
2062 ; RV32-NEXT: vmerge.vvm v8, v8, v24, v0
2063 ; RV32-NEXT: addi sp, sp, 16
2064 ; RV32-NEXT: .cfi_def_cfa_offset 0
2067 ; RV64-LABEL: vmadd_vx_nxv8i64:
2069 ; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, mu
2070 ; RV64-NEXT: vmadd.vx v8, a0, v16, v0.t
2072 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2073 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2074 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2075 %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2076 %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
2077 ret <vscale x 8 x i64> %u
2080 define <vscale x 8 x i64> @vmadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %a, i64 %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2081 ; RV32-LABEL: vmadd_vx_nxv8i64_unmasked:
2083 ; RV32-NEXT: addi sp, sp, -16
2084 ; RV32-NEXT: .cfi_def_cfa_offset 16
2085 ; RV32-NEXT: sw a0, 8(sp)
2086 ; RV32-NEXT: sw a1, 12(sp)
2087 ; RV32-NEXT: addi a0, sp, 8
2088 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2089 ; RV32-NEXT: vlse64.v v24, (a0), zero
2090 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2091 ; RV32-NEXT: vmadd.vv v24, v8, v16
2092 ; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma
2093 ; RV32-NEXT: vmv.v.v v8, v24
2094 ; RV32-NEXT: addi sp, sp, 16
2095 ; RV32-NEXT: .cfi_def_cfa_offset 0
2098 ; RV64-LABEL: vmadd_vx_nxv8i64_unmasked:
2100 ; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, ma
2101 ; RV64-NEXT: vmadd.vx v8, a0, v16
2103 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2104 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2105 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2106 %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2107 %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> splat (i1 -1), <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
2108 ret <vscale x 8 x i64> %u
2111 define <vscale x 8 x i64> @vmadd_vv_nxv8i64_ta(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2112 ; CHECK-LABEL: vmadd_vv_nxv8i64_ta:
2114 ; CHECK-NEXT: vl8re64.v v24, (a0)
2115 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2116 ; CHECK-NEXT: vmacc.vv v24, v8, v16
2117 ; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
2119 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2120 %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2121 %u = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
2122 ret <vscale x 8 x i64> %u
2125 define <vscale x 8 x i64> @vmadd_vx_nxv8i64_ta(<vscale x 8 x i64> %a, i64 %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2126 ; RV32-LABEL: vmadd_vx_nxv8i64_ta:
2128 ; RV32-NEXT: addi sp, sp, -16
2129 ; RV32-NEXT: .cfi_def_cfa_offset 16
2130 ; RV32-NEXT: sw a0, 8(sp)
2131 ; RV32-NEXT: sw a1, 12(sp)
2132 ; RV32-NEXT: addi a0, sp, 8
2133 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2134 ; RV32-NEXT: vlse64.v v24, (a0), zero
2135 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
2136 ; RV32-NEXT: vmadd.vv v24, v8, v16
2137 ; RV32-NEXT: vmerge.vvm v8, v8, v24, v0
2138 ; RV32-NEXT: addi sp, sp, 16
2139 ; RV32-NEXT: .cfi_def_cfa_offset 0
2142 ; RV64-LABEL: vmadd_vx_nxv8i64_ta:
2144 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2145 ; RV64-NEXT: vmadd.vx v8, a0, v16, v0.t
2147 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2148 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2149 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2150 %y = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
2151 %u = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %a, i32 %evl)
2152 ret <vscale x 8 x i64> %u