1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32 \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
7 ; This tests a mix of vmacc and vmsub by using different operand orders to
8 ; trigger commuting in TwosubressInstructionPass.
10 define <vscale x 1 x i8> @vnmsub_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i8> %vc) {
11 ; CHECK-LABEL: vnmsub_vv_nxv1i8:
13 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
14 ; CHECK-NEXT: vnmsub.vv v8, v9, v10
16 %x = mul <vscale x 1 x i8> %va, %vb
17 %y = sub <vscale x 1 x i8> %vc, %x
18 ret <vscale x 1 x i8> %y
21 define <vscale x 1 x i8> @vnmsub_vx_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, i8 %c) {
22 ; CHECK-LABEL: vnmsub_vx_nxv1i8:
24 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
25 ; CHECK-NEXT: vnmsub.vx v8, a0, v9
27 %head = insertelement <vscale x 1 x i8> poison, i8 %c, i32 0
28 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
29 %x = mul <vscale x 1 x i8> %va, %splat
30 %y = sub <vscale x 1 x i8> %vb, %x
31 ret <vscale x 1 x i8> %y
34 define <vscale x 2 x i8> @vnmsub_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i8> %vc) {
35 ; CHECK-LABEL: vnmsub_vv_nxv2i8:
37 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
38 ; CHECK-NEXT: vnmsub.vv v8, v10, v9
40 %x = mul <vscale x 2 x i8> %va, %vc
41 %y = sub <vscale x 2 x i8> %vb, %x
42 ret <vscale x 2 x i8> %y
45 define <vscale x 2 x i8> @vnmsub_vx_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, i8 %c) {
46 ; CHECK-LABEL: vnmsub_vx_nxv2i8:
48 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
49 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
51 %head = insertelement <vscale x 2 x i8> poison, i8 %c, i32 0
52 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
53 %x = mul <vscale x 2 x i8> %vb, %splat
54 %y = sub <vscale x 2 x i8> %va, %x
55 ret <vscale x 2 x i8> %y
58 define <vscale x 4 x i8> @vnmsub_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i8> %vc) {
59 ; CHECK-LABEL: vnmsub_vv_nxv4i8:
61 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
62 ; CHECK-NEXT: vnmsub.vv v8, v9, v10
64 %x = mul <vscale x 4 x i8> %vb, %va
65 %y = sub <vscale x 4 x i8> %vc, %x
66 ret <vscale x 4 x i8> %y
69 define <vscale x 4 x i8> @vnmsub_vx_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, i8 %c) {
70 ; CHECK-LABEL: vnmsub_vx_nxv4i8:
72 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
73 ; CHECK-NEXT: vnmsub.vx v8, a0, v9
75 %head = insertelement <vscale x 4 x i8> poison, i8 %c, i32 0
76 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
77 %x = mul <vscale x 4 x i8> %va, %splat
78 %y = sub <vscale x 4 x i8> %vb, %x
79 ret <vscale x 4 x i8> %y
82 define <vscale x 8 x i8> @vnmsub_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i8> %vc) {
83 ; CHECK-LABEL: vnmsub_vv_nxv8i8:
85 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
86 ; CHECK-NEXT: vnmsac.vv v8, v10, v9
88 %x = mul <vscale x 8 x i8> %vb, %vc
89 %y = sub <vscale x 8 x i8> %va, %x
90 ret <vscale x 8 x i8> %y
93 define <vscale x 8 x i8> @vnmsub_vx_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i8 %c) {
94 ; CHECK-LABEL: vnmsub_vx_nxv8i8:
96 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
97 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
99 %head = insertelement <vscale x 8 x i8> poison, i8 %c, i32 0
100 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
101 %x = mul <vscale x 8 x i8> %vb, %splat
102 %y = sub <vscale x 8 x i8> %va, %x
103 ret <vscale x 8 x i8> %y
106 define <vscale x 16 x i8> @vnmsub_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i8> %vc) {
107 ; CHECK-LABEL: vnmsub_vv_nxv16i8:
109 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
110 ; CHECK-NEXT: vnmsub.vv v8, v12, v10
112 %x = mul <vscale x 16 x i8> %vc, %va
113 %y = sub <vscale x 16 x i8> %vb, %x
114 ret <vscale x 16 x i8> %y
117 define <vscale x 16 x i8> @vnmsub_vx_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, i8 %c) {
118 ; CHECK-LABEL: vnmsub_vx_nxv16i8:
120 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
121 ; CHECK-NEXT: vnmsub.vx v8, a0, v10
123 %head = insertelement <vscale x 16 x i8> poison, i8 %c, i32 0
124 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
125 %x = mul <vscale x 16 x i8> %va, %splat
126 %y = sub <vscale x 16 x i8> %vb, %x
127 ret <vscale x 16 x i8> %y
130 define <vscale x 32 x i8> @vnmsub_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i8> %vc) {
131 ; CHECK-LABEL: vnmsub_vv_nxv32i8:
133 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
134 ; CHECK-NEXT: vnmsac.vv v8, v16, v12
136 %x = mul <vscale x 32 x i8> %vc, %vb
137 %y = sub <vscale x 32 x i8> %va, %x
138 ret <vscale x 32 x i8> %y
141 define <vscale x 32 x i8> @vnmsub_vx_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, i8 %c) {
142 ; CHECK-LABEL: vnmsub_vx_nxv32i8:
144 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
145 ; CHECK-NEXT: vnmsac.vx v8, a0, v12
147 %head = insertelement <vscale x 32 x i8> poison, i8 %c, i32 0
148 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
149 %x = mul <vscale x 32 x i8> %vb, %splat
150 %y = sub <vscale x 32 x i8> %va, %x
151 ret <vscale x 32 x i8> %y
154 define <vscale x 64 x i8> @vnmsub_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i8> %vc) {
155 ; CHECK-LABEL: vnmsub_vv_nxv64i8:
157 ; CHECK-NEXT: vl8r.v v24, (a0)
158 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
159 ; CHECK-NEXT: vnmsac.vv v8, v16, v24
161 %x = mul <vscale x 64 x i8> %vc, %vb
162 %y = sub <vscale x 64 x i8> %va, %x
163 ret <vscale x 64 x i8> %y
166 define <vscale x 64 x i8> @vnmsub_vx_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, i8 %c) {
167 ; CHECK-LABEL: vnmsub_vx_nxv64i8:
169 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
170 ; CHECK-NEXT: vnmsac.vx v8, a0, v16
172 %head = insertelement <vscale x 64 x i8> poison, i8 %c, i32 0
173 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
174 %x = mul <vscale x 64 x i8> %vb, %splat
175 %y = sub <vscale x 64 x i8> %va, %x
176 ret <vscale x 64 x i8> %y
179 define <vscale x 1 x i16> @vnmsub_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i16> %vc) {
180 ; CHECK-LABEL: vnmsub_vv_nxv1i16:
182 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
183 ; CHECK-NEXT: vnmsub.vv v8, v9, v10
185 %x = mul <vscale x 1 x i16> %va, %vb
186 %y = sub <vscale x 1 x i16> %vc, %x
187 ret <vscale x 1 x i16> %y
190 define <vscale x 1 x i16> @vnmsub_vx_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, i16 %c) {
191 ; CHECK-LABEL: vnmsub_vx_nxv1i16:
193 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
194 ; CHECK-NEXT: vnmsub.vx v8, a0, v9
196 %head = insertelement <vscale x 1 x i16> poison, i16 %c, i32 0
197 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
198 %x = mul <vscale x 1 x i16> %va, %splat
199 %y = sub <vscale x 1 x i16> %vb, %x
200 ret <vscale x 1 x i16> %y
203 define <vscale x 2 x i16> @vnmsub_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i16> %vc) {
204 ; CHECK-LABEL: vnmsub_vv_nxv2i16:
206 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
207 ; CHECK-NEXT: vnmsub.vv v8, v10, v9
209 %x = mul <vscale x 2 x i16> %va, %vc
210 %y = sub <vscale x 2 x i16> %vb, %x
211 ret <vscale x 2 x i16> %y
214 define <vscale x 2 x i16> @vnmsub_vx_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, i16 %c) {
215 ; CHECK-LABEL: vnmsub_vx_nxv2i16:
217 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
218 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
220 %head = insertelement <vscale x 2 x i16> poison, i16 %c, i32 0
221 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
222 %x = mul <vscale x 2 x i16> %vb, %splat
223 %y = sub <vscale x 2 x i16> %va, %x
224 ret <vscale x 2 x i16> %y
227 define <vscale x 4 x i16> @vnmsub_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i16> %vc) {
228 ; CHECK-LABEL: vnmsub_vv_nxv4i16:
230 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
231 ; CHECK-NEXT: vnmsub.vv v8, v9, v10
233 %x = mul <vscale x 4 x i16> %vb, %va
234 %y = sub <vscale x 4 x i16> %vc, %x
235 ret <vscale x 4 x i16> %y
238 define <vscale x 4 x i16> @vnmsub_vx_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i16 %c) {
239 ; CHECK-LABEL: vnmsub_vx_nxv4i16:
241 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
242 ; CHECK-NEXT: vnmsub.vx v8, a0, v9
244 %head = insertelement <vscale x 4 x i16> poison, i16 %c, i32 0
245 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
246 %x = mul <vscale x 4 x i16> %va, %splat
247 %y = sub <vscale x 4 x i16> %vb, %x
248 ret <vscale x 4 x i16> %y
251 define <vscale x 8 x i16> @vnmsub_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i16> %vc) {
252 ; CHECK-LABEL: vnmsub_vv_nxv8i16:
254 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
255 ; CHECK-NEXT: vnmsac.vv v8, v12, v10
257 %x = mul <vscale x 8 x i16> %vb, %vc
258 %y = sub <vscale x 8 x i16> %va, %x
259 ret <vscale x 8 x i16> %y
262 define <vscale x 8 x i16> @vnmsub_vx_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, i16 %c) {
263 ; CHECK-LABEL: vnmsub_vx_nxv8i16:
265 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
266 ; CHECK-NEXT: vnmsac.vx v8, a0, v10
268 %head = insertelement <vscale x 8 x i16> poison, i16 %c, i32 0
269 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
270 %x = mul <vscale x 8 x i16> %vb, %splat
271 %y = sub <vscale x 8 x i16> %va, %x
272 ret <vscale x 8 x i16> %y
275 define <vscale x 16 x i16> @vnmsub_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i16> %vc) {
276 ; CHECK-LABEL: vnmsub_vv_nxv16i16:
278 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
279 ; CHECK-NEXT: vnmsub.vv v8, v16, v12
281 %x = mul <vscale x 16 x i16> %vc, %va
282 %y = sub <vscale x 16 x i16> %vb, %x
283 ret <vscale x 16 x i16> %y
286 define <vscale x 16 x i16> @vnmsub_vx_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, i16 %c) {
287 ; CHECK-LABEL: vnmsub_vx_nxv16i16:
289 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
290 ; CHECK-NEXT: vnmsub.vx v8, a0, v12
292 %head = insertelement <vscale x 16 x i16> poison, i16 %c, i32 0
293 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
294 %x = mul <vscale x 16 x i16> %va, %splat
295 %y = sub <vscale x 16 x i16> %vb, %x
296 ret <vscale x 16 x i16> %y
299 define <vscale x 32 x i16> @vnmsub_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i16> %vc) {
300 ; CHECK-LABEL: vnmsub_vv_nxv32i16:
302 ; CHECK-NEXT: vl8re16.v v24, (a0)
303 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
304 ; CHECK-NEXT: vnmsac.vv v8, v16, v24
306 %x = mul <vscale x 32 x i16> %vc, %vb
307 %y = sub <vscale x 32 x i16> %va, %x
308 ret <vscale x 32 x i16> %y
311 define <vscale x 32 x i16> @vnmsub_vx_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, i16 %c) {
312 ; CHECK-LABEL: vnmsub_vx_nxv32i16:
314 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
315 ; CHECK-NEXT: vnmsac.vx v8, a0, v16
317 %head = insertelement <vscale x 32 x i16> poison, i16 %c, i32 0
318 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
319 %x = mul <vscale x 32 x i16> %vb, %splat
320 %y = sub <vscale x 32 x i16> %va, %x
321 ret <vscale x 32 x i16> %y
324 define <vscale x 1 x i32> @vnmsub_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i32> %vc) {
325 ; CHECK-LABEL: vnmsub_vv_nxv1i32:
327 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
328 ; CHECK-NEXT: vnmsub.vv v8, v9, v10
330 %x = mul <vscale x 1 x i32> %va, %vb
331 %y = sub <vscale x 1 x i32> %vc, %x
332 ret <vscale x 1 x i32> %y
335 define <vscale x 1 x i32> @vnmsub_vx_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, i32 %c) {
336 ; CHECK-LABEL: vnmsub_vx_nxv1i32:
338 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
339 ; CHECK-NEXT: vnmsub.vx v8, a0, v9
341 %head = insertelement <vscale x 1 x i32> poison, i32 %c, i32 0
342 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
343 %x = mul <vscale x 1 x i32> %va, %splat
344 %y = sub <vscale x 1 x i32> %vb, %x
345 ret <vscale x 1 x i32> %y
348 define <vscale x 2 x i32> @vnmsub_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i32> %vc) {
349 ; CHECK-LABEL: vnmsub_vv_nxv2i32:
351 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
352 ; CHECK-NEXT: vnmsub.vv v8, v10, v9
354 %x = mul <vscale x 2 x i32> %va, %vc
355 %y = sub <vscale x 2 x i32> %vb, %x
356 ret <vscale x 2 x i32> %y
359 define <vscale x 2 x i32> @vnmsub_vx_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 %c) {
360 ; CHECK-LABEL: vnmsub_vx_nxv2i32:
362 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
363 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
365 %head = insertelement <vscale x 2 x i32> poison, i32 %c, i32 0
366 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
367 %x = mul <vscale x 2 x i32> %vb, %splat
368 %y = sub <vscale x 2 x i32> %va, %x
369 ret <vscale x 2 x i32> %y
372 define <vscale x 4 x i32> @vnmsub_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i32> %vc) {
373 ; CHECK-LABEL: vnmsub_vv_nxv4i32:
375 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
376 ; CHECK-NEXT: vnmsub.vv v8, v10, v12
378 %x = mul <vscale x 4 x i32> %vb, %va
379 %y = sub <vscale x 4 x i32> %vc, %x
380 ret <vscale x 4 x i32> %y
383 define <vscale x 4 x i32> @vnmsub_vx_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, i32 %c) {
384 ; CHECK-LABEL: vnmsub_vx_nxv4i32:
386 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
387 ; CHECK-NEXT: vnmsub.vx v8, a0, v10
389 %head = insertelement <vscale x 4 x i32> poison, i32 %c, i32 0
390 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
391 %x = mul <vscale x 4 x i32> %va, %splat
392 %y = sub <vscale x 4 x i32> %vb, %x
393 ret <vscale x 4 x i32> %y
396 define <vscale x 8 x i32> @vnmsub_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i32> %vc) {
397 ; CHECK-LABEL: vnmsub_vv_nxv8i32:
399 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
400 ; CHECK-NEXT: vnmsac.vv v8, v16, v12
402 %x = mul <vscale x 8 x i32> %vb, %vc
403 %y = sub <vscale x 8 x i32> %va, %x
404 ret <vscale x 8 x i32> %y
407 define <vscale x 8 x i32> @vnmsub_vx_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, i32 %c) {
408 ; CHECK-LABEL: vnmsub_vx_nxv8i32:
410 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
411 ; CHECK-NEXT: vnmsac.vx v8, a0, v12
413 %head = insertelement <vscale x 8 x i32> poison, i32 %c, i32 0
414 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
415 %x = mul <vscale x 8 x i32> %vb, %splat
416 %y = sub <vscale x 8 x i32> %va, %x
417 ret <vscale x 8 x i32> %y
420 define <vscale x 16 x i32> @vnmsub_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i32> %vc) {
421 ; CHECK-LABEL: vnmsub_vv_nxv16i32:
423 ; CHECK-NEXT: vl8re32.v v24, (a0)
424 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
425 ; CHECK-NEXT: vnmsub.vv v8, v24, v16
427 %x = mul <vscale x 16 x i32> %vc, %va
428 %y = sub <vscale x 16 x i32> %vb, %x
429 ret <vscale x 16 x i32> %y
432 define <vscale x 16 x i32> @vnmsub_vx_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, i32 %c) {
433 ; CHECK-LABEL: vnmsub_vx_nxv16i32:
435 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
436 ; CHECK-NEXT: vnmsub.vx v8, a0, v16
438 %head = insertelement <vscale x 16 x i32> poison, i32 %c, i32 0
439 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
440 %x = mul <vscale x 16 x i32> %va, %splat
441 %y = sub <vscale x 16 x i32> %vb, %x
442 ret <vscale x 16 x i32> %y
445 define <vscale x 1 x i64> @vnmsub_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i64> %vc) {
446 ; CHECK-LABEL: vnmsub_vv_nxv1i64:
448 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
449 ; CHECK-NEXT: vnmsub.vv v8, v9, v10
451 %x = mul <vscale x 1 x i64> %va, %vb
452 %y = sub <vscale x 1 x i64> %vc, %x
453 ret <vscale x 1 x i64> %y
456 define <vscale x 1 x i64> @vnmsub_vx_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i64 %c) {
457 ; RV32-LABEL: vnmsub_vx_nxv1i64:
459 ; RV32-NEXT: addi sp, sp, -16
460 ; RV32-NEXT: .cfi_def_cfa_offset 16
461 ; RV32-NEXT: sw a1, 12(sp)
462 ; RV32-NEXT: sw a0, 8(sp)
463 ; RV32-NEXT: addi a0, sp, 8
464 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
465 ; RV32-NEXT: vlse64.v v10, (a0), zero
466 ; RV32-NEXT: vnmsub.vv v8, v10, v9
467 ; RV32-NEXT: addi sp, sp, 16
470 ; RV64-LABEL: vnmsub_vx_nxv1i64:
472 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
473 ; RV64-NEXT: vnmsub.vx v8, a0, v9
475 %head = insertelement <vscale x 1 x i64> poison, i64 %c, i32 0
476 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
477 %x = mul <vscale x 1 x i64> %va, %splat
478 %y = sub <vscale x 1 x i64> %vb, %x
479 ret <vscale x 1 x i64> %y
482 define <vscale x 2 x i64> @vnmsub_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i64> %vc) {
483 ; CHECK-LABEL: vnmsub_vv_nxv2i64:
485 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
486 ; CHECK-NEXT: vnmsub.vv v8, v12, v10
488 %x = mul <vscale x 2 x i64> %va, %vc
489 %y = sub <vscale x 2 x i64> %vb, %x
490 ret <vscale x 2 x i64> %y
493 define <vscale x 2 x i64> @vnmsub_vx_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i64 %c) {
494 ; RV32-LABEL: vnmsub_vx_nxv2i64:
496 ; RV32-NEXT: addi sp, sp, -16
497 ; RV32-NEXT: .cfi_def_cfa_offset 16
498 ; RV32-NEXT: sw a1, 12(sp)
499 ; RV32-NEXT: sw a0, 8(sp)
500 ; RV32-NEXT: addi a0, sp, 8
501 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
502 ; RV32-NEXT: vlse64.v v12, (a0), zero
503 ; RV32-NEXT: vnmsac.vv v8, v10, v12
504 ; RV32-NEXT: addi sp, sp, 16
507 ; RV64-LABEL: vnmsub_vx_nxv2i64:
509 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
510 ; RV64-NEXT: vnmsac.vx v8, a0, v10
512 %head = insertelement <vscale x 2 x i64> poison, i64 %c, i32 0
513 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
514 %x = mul <vscale x 2 x i64> %vb, %splat
515 %y = sub <vscale x 2 x i64> %va, %x
516 ret <vscale x 2 x i64> %y
519 define <vscale x 4 x i64> @vnmsub_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i64> %vc) {
520 ; CHECK-LABEL: vnmsub_vv_nxv4i64:
522 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
523 ; CHECK-NEXT: vnmsub.vv v8, v12, v16
525 %x = mul <vscale x 4 x i64> %vb, %va
526 %y = sub <vscale x 4 x i64> %vc, %x
527 ret <vscale x 4 x i64> %y
530 define <vscale x 4 x i64> @vnmsub_vx_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, i64 %c) {
531 ; RV32-LABEL: vnmsub_vx_nxv4i64:
533 ; RV32-NEXT: addi sp, sp, -16
534 ; RV32-NEXT: .cfi_def_cfa_offset 16
535 ; RV32-NEXT: sw a1, 12(sp)
536 ; RV32-NEXT: sw a0, 8(sp)
537 ; RV32-NEXT: addi a0, sp, 8
538 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
539 ; RV32-NEXT: vlse64.v v16, (a0), zero
540 ; RV32-NEXT: vnmsub.vv v8, v16, v12
541 ; RV32-NEXT: addi sp, sp, 16
544 ; RV64-LABEL: vnmsub_vx_nxv4i64:
546 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma
547 ; RV64-NEXT: vnmsub.vx v8, a0, v12
549 %head = insertelement <vscale x 4 x i64> poison, i64 %c, i32 0
550 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
551 %x = mul <vscale x 4 x i64> %va, %splat
552 %y = sub <vscale x 4 x i64> %vb, %x
553 ret <vscale x 4 x i64> %y
556 define <vscale x 8 x i64> @vnmsub_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i64> %vc) {
557 ; CHECK-LABEL: vnmsub_vv_nxv8i64:
559 ; CHECK-NEXT: vl8re64.v v24, (a0)
560 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
561 ; CHECK-NEXT: vnmsac.vv v8, v16, v24
563 %x = mul <vscale x 8 x i64> %vb, %vc
564 %y = sub <vscale x 8 x i64> %va, %x
565 ret <vscale x 8 x i64> %y
568 define <vscale x 8 x i64> @vnmsub_vx_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, i64 %c) {
569 ; RV32-LABEL: vnmsub_vx_nxv8i64:
571 ; RV32-NEXT: addi sp, sp, -16
572 ; RV32-NEXT: .cfi_def_cfa_offset 16
573 ; RV32-NEXT: sw a1, 12(sp)
574 ; RV32-NEXT: sw a0, 8(sp)
575 ; RV32-NEXT: addi a0, sp, 8
576 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
577 ; RV32-NEXT: vlse64.v v24, (a0), zero
578 ; RV32-NEXT: vnmsac.vv v8, v16, v24
579 ; RV32-NEXT: addi sp, sp, 16
582 ; RV64-LABEL: vnmsub_vx_nxv8i64:
584 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
585 ; RV64-NEXT: vnmsac.vx v8, a0, v16
587 %head = insertelement <vscale x 8 x i64> poison, i64 %c, i32 0
588 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
589 %x = mul <vscale x 8 x i64> %vb, %splat
590 %y = sub <vscale x 8 x i64> %va, %x
591 ret <vscale x 8 x i64> %y
594 define <vscale x 4 x i32> @combine_mul_sub_imm1(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
595 ; CHECK-LABEL: combine_mul_sub_imm1:
597 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
598 ; CHECK-NEXT: vnmsub.vv v8, v10, v10
600 %x = sub <vscale x 4 x i32> splat (i32 1), %a
601 %y = mul <vscale x 4 x i32> %x, %b
602 ret <vscale x 4 x i32> %y
605 define <vscale x 4 x i32> @combine_mul_sub_imm1_2(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
606 ; CHECK-LABEL: combine_mul_sub_imm1_2:
608 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
609 ; CHECK-NEXT: vnmsub.vv v8, v10, v10
611 %x = sub <vscale x 4 x i32> splat (i32 1), %a
612 %y = mul <vscale x 4 x i32> %b, %x
613 ret <vscale x 4 x i32> %y