1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
8 declare <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
9 declare <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1>, <vscale x 1 x i8>, <vscale x 1 x i8>, i32)
10 declare <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1>, <vscale x 1 x i8>, <vscale x 1 x i8>, i32)
12 define <vscale x 1 x i8> @vnmsac_vv_nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
13 ; CHECK-LABEL: vnmsac_vv_nxv1i8:
15 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
16 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
17 ; CHECK-NEXT: vmv1r.v v8, v10
19 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
20 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
21 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
22 %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
23 %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
24 ret <vscale x 1 x i8> %u
27 define <vscale x 1 x i8> @vnmsac_vv_nxv1i8_unmasked(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
28 ; CHECK-LABEL: vnmsac_vv_nxv1i8_unmasked:
30 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
31 ; CHECK-NEXT: vnmsac.vv v10, v8, v9
32 ; CHECK-NEXT: vmv1r.v v8, v10
34 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
35 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
36 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
37 %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
38 %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %allones, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
39 ret <vscale x 1 x i8> %u
42 define <vscale x 1 x i8> @vnmsac_vx_nxv1i8(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
43 ; CHECK-LABEL: vnmsac_vx_nxv1i8:
45 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
46 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
47 ; CHECK-NEXT: vmv1r.v v8, v9
49 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
50 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
51 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
52 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
53 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
54 %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
55 %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
56 ret <vscale x 1 x i8> %u
59 define <vscale x 1 x i8> @vnmsac_vx_nxv1i8_unmasked(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
60 ; CHECK-LABEL: vnmsac_vx_nxv1i8_unmasked:
62 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
63 ; CHECK-NEXT: vnmsac.vx v9, a0, v8
64 ; CHECK-NEXT: vmv1r.v v8, v9
66 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
67 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
68 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
69 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
70 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
71 %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
72 %u = call <vscale x 1 x i8> @llvm.vp.merge.nxv1i8(<vscale x 1 x i1> %allones, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
73 ret <vscale x 1 x i8> %u
76 define <vscale x 1 x i8> @vnmsac_vv_nxv1i8_ta(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
77 ; CHECK-LABEL: vnmsac_vv_nxv1i8_ta:
79 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
80 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
81 ; CHECK-NEXT: vmv1r.v v8, v10
83 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
84 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
85 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %allones, i32 %evl)
86 %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
87 %u = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
88 ret <vscale x 1 x i8> %u
91 define <vscale x 1 x i8> @vnmsac_vx_nxv1i8_ta(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
92 ; CHECK-LABEL: vnmsac_vx_nxv1i8_ta:
94 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
95 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
96 ; CHECK-NEXT: vmv1r.v v8, v9
98 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
99 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
100 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
101 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
102 %x = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %allones, i32 %evl)
103 %y = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i8> %x, <vscale x 1 x i1> %allones, i32 %evl)
104 %u = call <vscale x 1 x i8> @llvm.vp.select.nxv1i8(<vscale x 1 x i1> %m, <vscale x 1 x i8> %y, <vscale x 1 x i8> %c, i32 %evl)
105 ret <vscale x 1 x i8> %u
108 declare <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
109 declare <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
110 declare <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>, <vscale x 2 x i8>, i32)
111 declare <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1>, <vscale x 2 x i8>, <vscale x 2 x i8>, i32)
113 define <vscale x 2 x i8> @vnmsac_vv_nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
114 ; CHECK-LABEL: vnmsac_vv_nxv2i8:
116 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
117 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
118 ; CHECK-NEXT: vmv1r.v v8, v10
120 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
121 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
122 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
123 %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
124 %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
125 ret <vscale x 2 x i8> %u
128 define <vscale x 2 x i8> @vnmsac_vv_nxv2i8_unmasked(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
129 ; CHECK-LABEL: vnmsac_vv_nxv2i8_unmasked:
131 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
132 ; CHECK-NEXT: vnmsac.vv v10, v8, v9
133 ; CHECK-NEXT: vmv1r.v v8, v10
135 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
136 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
137 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
138 %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
139 %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %allones, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
140 ret <vscale x 2 x i8> %u
143 define <vscale x 2 x i8> @vnmsac_vx_nxv2i8(<vscale x 2 x i8> %a, i8 %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
144 ; CHECK-LABEL: vnmsac_vx_nxv2i8:
146 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu
147 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
148 ; CHECK-NEXT: vmv1r.v v8, v9
150 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
151 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
152 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
153 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
154 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
155 %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
156 %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
157 ret <vscale x 2 x i8> %u
160 define <vscale x 2 x i8> @vnmsac_vx_nxv2i8_unmasked(<vscale x 2 x i8> %a, i8 %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
161 ; CHECK-LABEL: vnmsac_vx_nxv2i8_unmasked:
163 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
164 ; CHECK-NEXT: vnmsac.vx v9, a0, v8
165 ; CHECK-NEXT: vmv1r.v v8, v9
167 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
168 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
169 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
170 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
171 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
172 %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
173 %u = call <vscale x 2 x i8> @llvm.vp.merge.nxv2i8(<vscale x 2 x i1> %allones, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
174 ret <vscale x 2 x i8> %u
177 define <vscale x 2 x i8> @vnmsac_vv_nxv2i8_ta(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
178 ; CHECK-LABEL: vnmsac_vv_nxv2i8_ta:
180 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
181 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
182 ; CHECK-NEXT: vmv1r.v v8, v10
184 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
185 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
186 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %allones, i32 %evl)
187 %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
188 %u = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
189 ret <vscale x 2 x i8> %u
192 define <vscale x 2 x i8> @vnmsac_vx_nxv2i8_ta(<vscale x 2 x i8> %a, i8 %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
193 ; CHECK-LABEL: vnmsac_vx_nxv2i8_ta:
195 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
196 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
197 ; CHECK-NEXT: vmv1r.v v8, v9
199 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
200 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
201 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
202 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
203 %x = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %allones, i32 %evl)
204 %y = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %c, <vscale x 2 x i8> %x, <vscale x 2 x i1> %allones, i32 %evl)
205 %u = call <vscale x 2 x i8> @llvm.vp.select.nxv2i8(<vscale x 2 x i1> %m, <vscale x 2 x i8> %y, <vscale x 2 x i8> %c, i32 %evl)
206 ret <vscale x 2 x i8> %u
209 declare <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
210 declare <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
211 declare <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1>, <vscale x 4 x i8>, <vscale x 4 x i8>, i32)
212 declare <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1>, <vscale x 4 x i8>, <vscale x 4 x i8>, i32)
214 define <vscale x 4 x i8> @vnmsac_vv_nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
215 ; CHECK-LABEL: vnmsac_vv_nxv4i8:
217 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
218 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
219 ; CHECK-NEXT: vmv1r.v v8, v10
221 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
222 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
223 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
224 %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
225 %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
226 ret <vscale x 4 x i8> %u
229 define <vscale x 4 x i8> @vnmsac_vv_nxv4i8_unmasked(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
230 ; CHECK-LABEL: vnmsac_vv_nxv4i8_unmasked:
232 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
233 ; CHECK-NEXT: vnmsac.vv v10, v8, v9
234 ; CHECK-NEXT: vmv1r.v v8, v10
236 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
237 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
238 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
239 %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
240 %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %allones, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
241 ret <vscale x 4 x i8> %u
244 define <vscale x 4 x i8> @vnmsac_vx_nxv4i8(<vscale x 4 x i8> %a, i8 %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
245 ; CHECK-LABEL: vnmsac_vx_nxv4i8:
247 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
248 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
249 ; CHECK-NEXT: vmv1r.v v8, v9
251 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
252 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
253 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
254 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
255 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
256 %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
257 %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
258 ret <vscale x 4 x i8> %u
261 define <vscale x 4 x i8> @vnmsac_vx_nxv4i8_unmasked(<vscale x 4 x i8> %a, i8 %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
262 ; CHECK-LABEL: vnmsac_vx_nxv4i8_unmasked:
264 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
265 ; CHECK-NEXT: vnmsac.vx v9, a0, v8
266 ; CHECK-NEXT: vmv1r.v v8, v9
268 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
269 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
270 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
271 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
272 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
273 %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
274 %u = call <vscale x 4 x i8> @llvm.vp.merge.nxv4i8(<vscale x 4 x i1> %allones, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
275 ret <vscale x 4 x i8> %u
278 define <vscale x 4 x i8> @vnmsac_vv_nxv4i8_ta(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
279 ; CHECK-LABEL: vnmsac_vv_nxv4i8_ta:
281 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
282 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
283 ; CHECK-NEXT: vmv1r.v v8, v10
285 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
286 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
287 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %allones, i32 %evl)
288 %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
289 %u = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
290 ret <vscale x 4 x i8> %u
293 define <vscale x 4 x i8> @vnmsac_vx_nxv4i8_ta(<vscale x 4 x i8> %a, i8 %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
294 ; CHECK-LABEL: vnmsac_vx_nxv4i8_ta:
296 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
297 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
298 ; CHECK-NEXT: vmv1r.v v8, v9
300 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
301 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
302 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
303 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
304 %x = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %allones, i32 %evl)
305 %y = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %c, <vscale x 4 x i8> %x, <vscale x 4 x i1> %allones, i32 %evl)
306 %u = call <vscale x 4 x i8> @llvm.vp.select.nxv4i8(<vscale x 4 x i1> %m, <vscale x 4 x i8> %y, <vscale x 4 x i8> %c, i32 %evl)
307 ret <vscale x 4 x i8> %u
310 declare <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
311 declare <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
312 declare <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1>, <vscale x 8 x i8>, <vscale x 8 x i8>, i32)
313 declare <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1>, <vscale x 8 x i8>, <vscale x 8 x i8>, i32)
315 define <vscale x 8 x i8> @vnmsac_vv_nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
316 ; CHECK-LABEL: vnmsac_vv_nxv8i8:
318 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
319 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
320 ; CHECK-NEXT: vmv1r.v v8, v10
322 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
323 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
324 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
325 %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
326 %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
327 ret <vscale x 8 x i8> %u
330 define <vscale x 8 x i8> @vnmsac_vv_nxv8i8_unmasked(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
331 ; CHECK-LABEL: vnmsac_vv_nxv8i8_unmasked:
333 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
334 ; CHECK-NEXT: vnmsac.vv v10, v8, v9
335 ; CHECK-NEXT: vmv1r.v v8, v10
337 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
338 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
339 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
340 %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
341 %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %allones, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
342 ret <vscale x 8 x i8> %u
345 define <vscale x 8 x i8> @vnmsac_vx_nxv8i8(<vscale x 8 x i8> %a, i8 %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
346 ; CHECK-LABEL: vnmsac_vx_nxv8i8:
348 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu
349 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
350 ; CHECK-NEXT: vmv1r.v v8, v9
352 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
353 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
354 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
355 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
356 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
357 %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
358 %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
359 ret <vscale x 8 x i8> %u
362 define <vscale x 8 x i8> @vnmsac_vx_nxv8i8_unmasked(<vscale x 8 x i8> %a, i8 %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
363 ; CHECK-LABEL: vnmsac_vx_nxv8i8_unmasked:
365 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
366 ; CHECK-NEXT: vnmsac.vx v9, a0, v8
367 ; CHECK-NEXT: vmv1r.v v8, v9
369 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
370 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
371 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
372 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
373 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
374 %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
375 %u = call <vscale x 8 x i8> @llvm.vp.merge.nxv8i8(<vscale x 8 x i1> %allones, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
376 ret <vscale x 8 x i8> %u
379 define <vscale x 8 x i8> @vnmsac_vv_nxv8i8_ta(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
380 ; CHECK-LABEL: vnmsac_vv_nxv8i8_ta:
382 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
383 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
384 ; CHECK-NEXT: vmv.v.v v8, v10
386 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
387 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
388 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %allones, i32 %evl)
389 %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
390 %u = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
391 ret <vscale x 8 x i8> %u
394 define <vscale x 8 x i8> @vnmsac_vx_nxv8i8_ta(<vscale x 8 x i8> %a, i8 %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
395 ; CHECK-LABEL: vnmsac_vx_nxv8i8_ta:
397 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
398 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
399 ; CHECK-NEXT: vmv.v.v v8, v9
401 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
402 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
403 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
404 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
405 %x = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %allones, i32 %evl)
406 %y = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %c, <vscale x 8 x i8> %x, <vscale x 8 x i1> %allones, i32 %evl)
407 %u = call <vscale x 8 x i8> @llvm.vp.select.nxv8i8(<vscale x 8 x i1> %m, <vscale x 8 x i8> %y, <vscale x 8 x i8> %c, i32 %evl)
408 ret <vscale x 8 x i8> %u
411 declare <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
412 declare <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
413 declare <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32)
414 declare <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32)
416 define <vscale x 16 x i8> @vnmsac_vv_nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
417 ; CHECK-LABEL: vnmsac_vv_nxv16i8:
419 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
420 ; CHECK-NEXT: vnmsac.vv v12, v8, v10, v0.t
421 ; CHECK-NEXT: vmv2r.v v8, v12
423 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
424 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
425 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
426 %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
427 %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
428 ret <vscale x 16 x i8> %u
431 define <vscale x 16 x i8> @vnmsac_vv_nxv16i8_unmasked(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
432 ; CHECK-LABEL: vnmsac_vv_nxv16i8_unmasked:
434 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
435 ; CHECK-NEXT: vnmsac.vv v12, v8, v10
436 ; CHECK-NEXT: vmv2r.v v8, v12
438 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
439 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
440 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
441 %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
442 %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %allones, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
443 ret <vscale x 16 x i8> %u
446 define <vscale x 16 x i8> @vnmsac_vx_nxv16i8(<vscale x 16 x i8> %a, i8 %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
447 ; CHECK-LABEL: vnmsac_vx_nxv16i8:
449 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, mu
450 ; CHECK-NEXT: vnmsac.vx v10, a0, v8, v0.t
451 ; CHECK-NEXT: vmv2r.v v8, v10
453 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
454 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
455 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
456 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
457 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
458 %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
459 %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
460 ret <vscale x 16 x i8> %u
463 define <vscale x 16 x i8> @vnmsac_vx_nxv16i8_unmasked(<vscale x 16 x i8> %a, i8 %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
464 ; CHECK-LABEL: vnmsac_vx_nxv16i8_unmasked:
466 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
467 ; CHECK-NEXT: vnmsac.vx v10, a0, v8
468 ; CHECK-NEXT: vmv2r.v v8, v10
470 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
471 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
472 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
473 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
474 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
475 %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
476 %u = call <vscale x 16 x i8> @llvm.vp.merge.nxv16i8(<vscale x 16 x i1> %allones, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
477 ret <vscale x 16 x i8> %u
480 define <vscale x 16 x i8> @vnmsac_vv_nxv16i8_ta(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
481 ; CHECK-LABEL: vnmsac_vv_nxv16i8_ta:
483 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
484 ; CHECK-NEXT: vnmsac.vv v12, v8, v10, v0.t
485 ; CHECK-NEXT: vmv.v.v v8, v12
487 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
488 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
489 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %allones, i32 %evl)
490 %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
491 %u = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
492 ret <vscale x 16 x i8> %u
495 define <vscale x 16 x i8> @vnmsac_vx_nxv16i8_ta(<vscale x 16 x i8> %a, i8 %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
496 ; CHECK-LABEL: vnmsac_vx_nxv16i8_ta:
498 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
499 ; CHECK-NEXT: vnmsac.vx v10, a0, v8, v0.t
500 ; CHECK-NEXT: vmv.v.v v8, v10
502 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
503 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
504 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
505 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
506 %x = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %allones, i32 %evl)
507 %y = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %x, <vscale x 16 x i1> %allones, i32 %evl)
508 %u = call <vscale x 16 x i8> @llvm.vp.select.nxv16i8(<vscale x 16 x i1> %m, <vscale x 16 x i8> %y, <vscale x 16 x i8> %c, i32 %evl)
509 ret <vscale x 16 x i8> %u
512 declare <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
513 declare <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
514 declare <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1>, <vscale x 32 x i8>, <vscale x 32 x i8>, i32)
515 declare <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1>, <vscale x 32 x i8>, <vscale x 32 x i8>, i32)
517 define <vscale x 32 x i8> @vnmsac_vv_nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
518 ; CHECK-LABEL: vnmsac_vv_nxv32i8:
520 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
521 ; CHECK-NEXT: vnmsac.vv v16, v8, v12, v0.t
522 ; CHECK-NEXT: vmv4r.v v8, v16
524 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
525 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
526 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
527 %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
528 %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
529 ret <vscale x 32 x i8> %u
532 define <vscale x 32 x i8> @vnmsac_vv_nxv32i8_unmasked(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
533 ; CHECK-LABEL: vnmsac_vv_nxv32i8_unmasked:
535 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
536 ; CHECK-NEXT: vnmsac.vv v16, v8, v12
537 ; CHECK-NEXT: vmv4r.v v8, v16
539 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
540 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
541 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
542 %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
543 %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %allones, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
544 ret <vscale x 32 x i8> %u
547 define <vscale x 32 x i8> @vnmsac_vx_nxv32i8(<vscale x 32 x i8> %a, i8 %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
548 ; CHECK-LABEL: vnmsac_vx_nxv32i8:
550 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, mu
551 ; CHECK-NEXT: vnmsac.vx v12, a0, v8, v0.t
552 ; CHECK-NEXT: vmv4r.v v8, v12
554 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
555 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
556 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
557 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
558 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
559 %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
560 %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
561 ret <vscale x 32 x i8> %u
564 define <vscale x 32 x i8> @vnmsac_vx_nxv32i8_unmasked(<vscale x 32 x i8> %a, i8 %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
565 ; CHECK-LABEL: vnmsac_vx_nxv32i8_unmasked:
567 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
568 ; CHECK-NEXT: vnmsac.vx v12, a0, v8
569 ; CHECK-NEXT: vmv4r.v v8, v12
571 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
572 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
573 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
574 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
575 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
576 %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
577 %u = call <vscale x 32 x i8> @llvm.vp.merge.nxv32i8(<vscale x 32 x i1> %allones, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
578 ret <vscale x 32 x i8> %u
581 define <vscale x 32 x i8> @vnmsac_vv_nxv32i8_ta(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
582 ; CHECK-LABEL: vnmsac_vv_nxv32i8_ta:
584 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
585 ; CHECK-NEXT: vnmsac.vv v16, v8, v12, v0.t
586 ; CHECK-NEXT: vmv.v.v v8, v16
588 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
589 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
590 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %allones, i32 %evl)
591 %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
592 %u = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
593 ret <vscale x 32 x i8> %u
596 define <vscale x 32 x i8> @vnmsac_vx_nxv32i8_ta(<vscale x 32 x i8> %a, i8 %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
597 ; CHECK-LABEL: vnmsac_vx_nxv32i8_ta:
599 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
600 ; CHECK-NEXT: vnmsac.vx v12, a0, v8, v0.t
601 ; CHECK-NEXT: vmv.v.v v8, v12
603 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
604 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
605 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
606 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
607 %x = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %allones, i32 %evl)
608 %y = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %c, <vscale x 32 x i8> %x, <vscale x 32 x i1> %allones, i32 %evl)
609 %u = call <vscale x 32 x i8> @llvm.vp.select.nxv32i8(<vscale x 32 x i1> %m, <vscale x 32 x i8> %y, <vscale x 32 x i8> %c, i32 %evl)
610 ret <vscale x 32 x i8> %u
613 declare <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
614 declare <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
615 declare <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1>, <vscale x 64 x i8>, <vscale x 64 x i8>, i32)
616 declare <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1>, <vscale x 64 x i8>, <vscale x 64 x i8>, i32)
618 define <vscale x 64 x i8> @vnmsac_vv_nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
619 ; CHECK-LABEL: vnmsac_vv_nxv64i8:
621 ; CHECK-NEXT: vl8r.v v24, (a0)
622 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu
623 ; CHECK-NEXT: vnmsac.vv v24, v8, v16, v0.t
624 ; CHECK-NEXT: vmv8r.v v8, v24
626 %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
627 %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
628 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
629 %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
630 %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
631 ret <vscale x 64 x i8> %u
634 define <vscale x 64 x i8> @vnmsac_vv_nxv64i8_unmasked(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
635 ; CHECK-LABEL: vnmsac_vv_nxv64i8_unmasked:
637 ; CHECK-NEXT: vl8r.v v24, (a0)
638 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
639 ; CHECK-NEXT: vnmsac.vv v24, v8, v16
640 ; CHECK-NEXT: vmv8r.v v8, v24
642 %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
643 %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
644 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
645 %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
646 %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %allones, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
647 ret <vscale x 64 x i8> %u
650 define <vscale x 64 x i8> @vnmsac_vx_nxv64i8(<vscale x 64 x i8> %a, i8 %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
651 ; CHECK-LABEL: vnmsac_vx_nxv64i8:
653 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, mu
654 ; CHECK-NEXT: vnmsac.vx v16, a0, v8, v0.t
655 ; CHECK-NEXT: vmv8r.v v8, v16
657 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
658 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
659 %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
660 %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
661 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
662 %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
663 %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
664 ret <vscale x 64 x i8> %u
667 define <vscale x 64 x i8> @vnmsac_vx_nxv64i8_unmasked(<vscale x 64 x i8> %a, i8 %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
668 ; CHECK-LABEL: vnmsac_vx_nxv64i8_unmasked:
670 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
671 ; CHECK-NEXT: vnmsac.vx v16, a0, v8
672 ; CHECK-NEXT: vmv8r.v v8, v16
674 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
675 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
676 %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
677 %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
678 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
679 %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
680 %u = call <vscale x 64 x i8> @llvm.vp.merge.nxv64i8(<vscale x 64 x i1> %allones, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
681 ret <vscale x 64 x i8> %u
684 define <vscale x 64 x i8> @vnmsac_vv_nxv64i8_ta(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
685 ; CHECK-LABEL: vnmsac_vv_nxv64i8_ta:
687 ; CHECK-NEXT: vl8r.v v24, (a0)
688 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
689 ; CHECK-NEXT: vnmsac.vv v24, v8, v16, v0.t
690 ; CHECK-NEXT: vmv.v.v v8, v24
692 %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
693 %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
694 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %allones, i32 %evl)
695 %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
696 %u = call <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
697 ret <vscale x 64 x i8> %u
700 define <vscale x 64 x i8> @vnmsac_vx_nxv64i8_ta(<vscale x 64 x i8> %a, i8 %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
701 ; CHECK-LABEL: vnmsac_vx_nxv64i8_ta:
703 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
704 ; CHECK-NEXT: vnmsac.vx v16, a0, v8, v0.t
705 ; CHECK-NEXT: vmv.v.v v8, v16
707 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
708 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
709 %splat = insertelement <vscale x 64 x i1> poison, i1 -1, i32 0
710 %allones = shufflevector <vscale x 64 x i1> %splat, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
711 %x = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %allones, i32 %evl)
712 %y = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %c, <vscale x 64 x i8> %x, <vscale x 64 x i1> %allones, i32 %evl)
713 %u = call <vscale x 64 x i8> @llvm.vp.select.nxv64i8(<vscale x 64 x i1> %m, <vscale x 64 x i8> %y, <vscale x 64 x i8> %c, i32 %evl)
714 ret <vscale x 64 x i8> %u
717 declare <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
718 declare <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
719 declare <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1>, <vscale x 1 x i16>, <vscale x 1 x i16>, i32)
720 declare <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1>, <vscale x 1 x i16>, <vscale x 1 x i16>, i32)
722 define <vscale x 1 x i16> @vnmsac_vv_nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
723 ; CHECK-LABEL: vnmsac_vv_nxv1i16:
725 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
726 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
727 ; CHECK-NEXT: vmv1r.v v8, v10
729 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
730 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
731 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
732 %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
733 %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
734 ret <vscale x 1 x i16> %u
737 define <vscale x 1 x i16> @vnmsac_vv_nxv1i16_unmasked(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
738 ; CHECK-LABEL: vnmsac_vv_nxv1i16_unmasked:
740 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
741 ; CHECK-NEXT: vnmsac.vv v10, v8, v9
742 ; CHECK-NEXT: vmv1r.v v8, v10
744 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
745 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
746 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
747 %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
748 %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %allones, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
749 ret <vscale x 1 x i16> %u
752 define <vscale x 1 x i16> @vnmsac_vx_nxv1i16(<vscale x 1 x i16> %a, i16 %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
753 ; CHECK-LABEL: vnmsac_vx_nxv1i16:
755 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu
756 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
757 ; CHECK-NEXT: vmv1r.v v8, v9
759 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
760 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
761 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
762 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
763 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
764 %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
765 %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
766 ret <vscale x 1 x i16> %u
769 define <vscale x 1 x i16> @vnmsac_vx_nxv1i16_unmasked(<vscale x 1 x i16> %a, i16 %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
770 ; CHECK-LABEL: vnmsac_vx_nxv1i16_unmasked:
772 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
773 ; CHECK-NEXT: vnmsac.vx v9, a0, v8
774 ; CHECK-NEXT: vmv1r.v v8, v9
776 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
777 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
778 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
779 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
780 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
781 %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
782 %u = call <vscale x 1 x i16> @llvm.vp.merge.nxv1i16(<vscale x 1 x i1> %allones, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
783 ret <vscale x 1 x i16> %u
786 define <vscale x 1 x i16> @vnmsac_vv_nxv1i16_ta(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
787 ; CHECK-LABEL: vnmsac_vv_nxv1i16_ta:
789 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
790 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
791 ; CHECK-NEXT: vmv1r.v v8, v10
793 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
794 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
795 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %allones, i32 %evl)
796 %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
797 %u = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
798 ret <vscale x 1 x i16> %u
801 define <vscale x 1 x i16> @vnmsac_vx_nxv1i16_ta(<vscale x 1 x i16> %a, i16 %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
802 ; CHECK-LABEL: vnmsac_vx_nxv1i16_ta:
804 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
805 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
806 ; CHECK-NEXT: vmv1r.v v8, v9
808 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
809 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
810 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
811 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
812 %x = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %allones, i32 %evl)
813 %y = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %c, <vscale x 1 x i16> %x, <vscale x 1 x i1> %allones, i32 %evl)
814 %u = call <vscale x 1 x i16> @llvm.vp.select.nxv1i16(<vscale x 1 x i1> %m, <vscale x 1 x i16> %y, <vscale x 1 x i16> %c, i32 %evl)
815 ret <vscale x 1 x i16> %u
818 declare <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
819 declare <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
820 declare <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32)
821 declare <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1>, <vscale x 2 x i16>, <vscale x 2 x i16>, i32)
823 define <vscale x 2 x i16> @vnmsac_vv_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
824 ; CHECK-LABEL: vnmsac_vv_nxv2i16:
826 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
827 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
828 ; CHECK-NEXT: vmv1r.v v8, v10
830 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
831 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
832 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
833 %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
834 %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
835 ret <vscale x 2 x i16> %u
838 define <vscale x 2 x i16> @vnmsac_vv_nxv2i16_unmasked(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
839 ; CHECK-LABEL: vnmsac_vv_nxv2i16_unmasked:
841 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
842 ; CHECK-NEXT: vnmsac.vv v10, v8, v9
843 ; CHECK-NEXT: vmv1r.v v8, v10
845 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
846 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
847 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
848 %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
849 %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %allones, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
850 ret <vscale x 2 x i16> %u
853 define <vscale x 2 x i16> @vnmsac_vx_nxv2i16(<vscale x 2 x i16> %a, i16 %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
854 ; CHECK-LABEL: vnmsac_vx_nxv2i16:
856 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu
857 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
858 ; CHECK-NEXT: vmv1r.v v8, v9
860 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
861 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
862 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
863 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
864 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
865 %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
866 %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
867 ret <vscale x 2 x i16> %u
870 define <vscale x 2 x i16> @vnmsac_vx_nxv2i16_unmasked(<vscale x 2 x i16> %a, i16 %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
871 ; CHECK-LABEL: vnmsac_vx_nxv2i16_unmasked:
873 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
874 ; CHECK-NEXT: vnmsac.vx v9, a0, v8
875 ; CHECK-NEXT: vmv1r.v v8, v9
877 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
878 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
879 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
880 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
881 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
882 %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
883 %u = call <vscale x 2 x i16> @llvm.vp.merge.nxv2i16(<vscale x 2 x i1> %allones, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
884 ret <vscale x 2 x i16> %u
887 define <vscale x 2 x i16> @vnmsac_vv_nxv2i16_ta(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
888 ; CHECK-LABEL: vnmsac_vv_nxv2i16_ta:
890 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
891 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
892 ; CHECK-NEXT: vmv1r.v v8, v10
894 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
895 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
896 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %allones, i32 %evl)
897 %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
898 %u = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
899 ret <vscale x 2 x i16> %u
902 define <vscale x 2 x i16> @vnmsac_vx_nxv2i16_ta(<vscale x 2 x i16> %a, i16 %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
903 ; CHECK-LABEL: vnmsac_vx_nxv2i16_ta:
905 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
906 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
907 ; CHECK-NEXT: vmv1r.v v8, v9
909 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
910 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
911 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
912 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
913 %x = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %allones, i32 %evl)
914 %y = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %c, <vscale x 2 x i16> %x, <vscale x 2 x i1> %allones, i32 %evl)
915 %u = call <vscale x 2 x i16> @llvm.vp.select.nxv2i16(<vscale x 2 x i1> %m, <vscale x 2 x i16> %y, <vscale x 2 x i16> %c, i32 %evl)
916 ret <vscale x 2 x i16> %u
919 declare <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
920 declare <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
921 declare <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1>, <vscale x 4 x i16>, <vscale x 4 x i16>, i32)
922 declare <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1>, <vscale x 4 x i16>, <vscale x 4 x i16>, i32)
924 define <vscale x 4 x i16> @vnmsac_vv_nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
925 ; CHECK-LABEL: vnmsac_vv_nxv4i16:
927 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
928 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
929 ; CHECK-NEXT: vmv1r.v v8, v10
931 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
932 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
933 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
934 %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
935 %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
936 ret <vscale x 4 x i16> %u
939 define <vscale x 4 x i16> @vnmsac_vv_nxv4i16_unmasked(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
940 ; CHECK-LABEL: vnmsac_vv_nxv4i16_unmasked:
942 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
943 ; CHECK-NEXT: vnmsac.vv v10, v8, v9
944 ; CHECK-NEXT: vmv1r.v v8, v10
946 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
947 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
948 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
949 %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
950 %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %allones, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
951 ret <vscale x 4 x i16> %u
954 define <vscale x 4 x i16> @vnmsac_vx_nxv4i16(<vscale x 4 x i16> %a, i16 %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
955 ; CHECK-LABEL: vnmsac_vx_nxv4i16:
957 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu
958 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
959 ; CHECK-NEXT: vmv1r.v v8, v9
961 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
962 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
963 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
964 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
965 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
966 %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
967 %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
968 ret <vscale x 4 x i16> %u
971 define <vscale x 4 x i16> @vnmsac_vx_nxv4i16_unmasked(<vscale x 4 x i16> %a, i16 %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
972 ; CHECK-LABEL: vnmsac_vx_nxv4i16_unmasked:
974 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
975 ; CHECK-NEXT: vnmsac.vx v9, a0, v8
976 ; CHECK-NEXT: vmv1r.v v8, v9
978 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
979 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
980 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
981 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
982 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
983 %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
984 %u = call <vscale x 4 x i16> @llvm.vp.merge.nxv4i16(<vscale x 4 x i1> %allones, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
985 ret <vscale x 4 x i16> %u
988 define <vscale x 4 x i16> @vnmsac_vv_nxv4i16_ta(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
989 ; CHECK-LABEL: vnmsac_vv_nxv4i16_ta:
991 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
992 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
993 ; CHECK-NEXT: vmv.v.v v8, v10
995 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
996 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
997 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %allones, i32 %evl)
998 %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
999 %u = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
1000 ret <vscale x 4 x i16> %u
1003 define <vscale x 4 x i16> @vnmsac_vx_nxv4i16_ta(<vscale x 4 x i16> %a, i16 %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1004 ; CHECK-LABEL: vnmsac_vx_nxv4i16_ta:
1006 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
1007 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
1008 ; CHECK-NEXT: vmv.v.v v8, v9
1010 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
1011 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
1012 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
1013 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1014 %x = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %allones, i32 %evl)
1015 %y = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %c, <vscale x 4 x i16> %x, <vscale x 4 x i1> %allones, i32 %evl)
1016 %u = call <vscale x 4 x i16> @llvm.vp.select.nxv4i16(<vscale x 4 x i1> %m, <vscale x 4 x i16> %y, <vscale x 4 x i16> %c, i32 %evl)
1017 ret <vscale x 4 x i16> %u
1020 declare <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
1021 declare <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
1022 declare <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
1023 declare <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32)
1025 define <vscale x 8 x i16> @vnmsac_vv_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1026 ; CHECK-LABEL: vnmsac_vv_nxv8i16:
1028 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
1029 ; CHECK-NEXT: vnmsac.vv v12, v8, v10, v0.t
1030 ; CHECK-NEXT: vmv2r.v v8, v12
1032 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1033 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1034 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
1035 %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
1036 %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
1037 ret <vscale x 8 x i16> %u
1040 define <vscale x 8 x i16> @vnmsac_vv_nxv8i16_unmasked(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1041 ; CHECK-LABEL: vnmsac_vv_nxv8i16_unmasked:
1043 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
1044 ; CHECK-NEXT: vnmsac.vv v12, v8, v10
1045 ; CHECK-NEXT: vmv2r.v v8, v12
1047 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1048 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1049 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
1050 %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
1051 %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %allones, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
1052 ret <vscale x 8 x i16> %u
1055 define <vscale x 8 x i16> @vnmsac_vx_nxv8i16(<vscale x 8 x i16> %a, i16 %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1056 ; CHECK-LABEL: vnmsac_vx_nxv8i16:
1058 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu
1059 ; CHECK-NEXT: vnmsac.vx v10, a0, v8, v0.t
1060 ; CHECK-NEXT: vmv2r.v v8, v10
1062 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1063 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1064 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1065 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1066 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
1067 %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
1068 %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
1069 ret <vscale x 8 x i16> %u
1072 define <vscale x 8 x i16> @vnmsac_vx_nxv8i16_unmasked(<vscale x 8 x i16> %a, i16 %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1073 ; CHECK-LABEL: vnmsac_vx_nxv8i16_unmasked:
1075 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
1076 ; CHECK-NEXT: vnmsac.vx v10, a0, v8
1077 ; CHECK-NEXT: vmv2r.v v8, v10
1079 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1080 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1081 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1082 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1083 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
1084 %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
1085 %u = call <vscale x 8 x i16> @llvm.vp.merge.nxv8i16(<vscale x 8 x i1> %allones, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
1086 ret <vscale x 8 x i16> %u
1089 define <vscale x 8 x i16> @vnmsac_vv_nxv8i16_ta(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1090 ; CHECK-LABEL: vnmsac_vv_nxv8i16_ta:
1092 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
1093 ; CHECK-NEXT: vnmsac.vv v12, v8, v10, v0.t
1094 ; CHECK-NEXT: vmv.v.v v8, v12
1096 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1097 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1098 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %allones, i32 %evl)
1099 %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
1100 %u = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
1101 ret <vscale x 8 x i16> %u
1104 define <vscale x 8 x i16> @vnmsac_vx_nxv8i16_ta(<vscale x 8 x i16> %a, i16 %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1105 ; CHECK-LABEL: vnmsac_vx_nxv8i16_ta:
1107 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1108 ; CHECK-NEXT: vnmsac.vx v10, a0, v8, v0.t
1109 ; CHECK-NEXT: vmv.v.v v8, v10
1111 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1112 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1113 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1114 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1115 %x = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %allones, i32 %evl)
1116 %y = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %x, <vscale x 8 x i1> %allones, i32 %evl)
1117 %u = call <vscale x 8 x i16> @llvm.vp.select.nxv8i16(<vscale x 8 x i1> %m, <vscale x 8 x i16> %y, <vscale x 8 x i16> %c, i32 %evl)
1118 ret <vscale x 8 x i16> %u
1121 declare <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
1122 declare <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
1123 declare <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1>, <vscale x 16 x i16>, <vscale x 16 x i16>, i32)
1124 declare <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1>, <vscale x 16 x i16>, <vscale x 16 x i16>, i32)
1126 define <vscale x 16 x i16> @vnmsac_vv_nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1127 ; CHECK-LABEL: vnmsac_vv_nxv16i16:
1129 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
1130 ; CHECK-NEXT: vnmsac.vv v16, v8, v12, v0.t
1131 ; CHECK-NEXT: vmv4r.v v8, v16
1133 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1134 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1135 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
1136 %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
1137 %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
1138 ret <vscale x 16 x i16> %u
1141 define <vscale x 16 x i16> @vnmsac_vv_nxv16i16_unmasked(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1142 ; CHECK-LABEL: vnmsac_vv_nxv16i16_unmasked:
1144 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
1145 ; CHECK-NEXT: vnmsac.vv v16, v8, v12
1146 ; CHECK-NEXT: vmv4r.v v8, v16
1148 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1149 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1150 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
1151 %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
1152 %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %allones, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
1153 ret <vscale x 16 x i16> %u
1156 define <vscale x 16 x i16> @vnmsac_vx_nxv16i16(<vscale x 16 x i16> %a, i16 %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1157 ; CHECK-LABEL: vnmsac_vx_nxv16i16:
1159 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, mu
1160 ; CHECK-NEXT: vnmsac.vx v12, a0, v8, v0.t
1161 ; CHECK-NEXT: vmv4r.v v8, v12
1163 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
1164 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
1165 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1166 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1167 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
1168 %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
1169 %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
1170 ret <vscale x 16 x i16> %u
1173 define <vscale x 16 x i16> @vnmsac_vx_nxv16i16_unmasked(<vscale x 16 x i16> %a, i16 %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1174 ; CHECK-LABEL: vnmsac_vx_nxv16i16_unmasked:
1176 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
1177 ; CHECK-NEXT: vnmsac.vx v12, a0, v8
1178 ; CHECK-NEXT: vmv4r.v v8, v12
1180 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
1181 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
1182 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1183 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1184 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
1185 %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
1186 %u = call <vscale x 16 x i16> @llvm.vp.merge.nxv16i16(<vscale x 16 x i1> %allones, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
1187 ret <vscale x 16 x i16> %u
1190 define <vscale x 16 x i16> @vnmsac_vv_nxv16i16_ta(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1191 ; CHECK-LABEL: vnmsac_vv_nxv16i16_ta:
1193 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1194 ; CHECK-NEXT: vnmsac.vv v16, v8, v12, v0.t
1195 ; CHECK-NEXT: vmv.v.v v8, v16
1197 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1198 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1199 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %allones, i32 %evl)
1200 %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
1201 %u = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
1202 ret <vscale x 16 x i16> %u
1205 define <vscale x 16 x i16> @vnmsac_vx_nxv16i16_ta(<vscale x 16 x i16> %a, i16 %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1206 ; CHECK-LABEL: vnmsac_vx_nxv16i16_ta:
1208 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
1209 ; CHECK-NEXT: vnmsac.vx v12, a0, v8, v0.t
1210 ; CHECK-NEXT: vmv.v.v v8, v12
1212 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
1213 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
1214 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1215 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1216 %x = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %allones, i32 %evl)
1217 %y = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %c, <vscale x 16 x i16> %x, <vscale x 16 x i1> %allones, i32 %evl)
1218 %u = call <vscale x 16 x i16> @llvm.vp.select.nxv16i16(<vscale x 16 x i1> %m, <vscale x 16 x i16> %y, <vscale x 16 x i16> %c, i32 %evl)
1219 ret <vscale x 16 x i16> %u
1222 declare <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
1223 declare <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
1224 declare <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1>, <vscale x 32 x i16>, <vscale x 32 x i16>, i32)
1225 declare <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1>, <vscale x 32 x i16>, <vscale x 32 x i16>, i32)
1227 define <vscale x 32 x i16> @vnmsac_vv_nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1228 ; CHECK-LABEL: vnmsac_vv_nxv32i16:
1230 ; CHECK-NEXT: vl8re16.v v24, (a0)
1231 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu
1232 ; CHECK-NEXT: vnmsac.vv v24, v8, v16, v0.t
1233 ; CHECK-NEXT: vmv8r.v v8, v24
1235 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
1236 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
1237 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
1238 %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
1239 %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
1240 ret <vscale x 32 x i16> %u
1243 define <vscale x 32 x i16> @vnmsac_vv_nxv32i16_unmasked(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1244 ; CHECK-LABEL: vnmsac_vv_nxv32i16_unmasked:
1246 ; CHECK-NEXT: vl8re16.v v24, (a0)
1247 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma
1248 ; CHECK-NEXT: vnmsac.vv v24, v8, v16
1249 ; CHECK-NEXT: vmv8r.v v8, v24
1251 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
1252 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
1253 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
1254 %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
1255 %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %allones, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
1256 ret <vscale x 32 x i16> %u
1259 define <vscale x 32 x i16> @vnmsac_vx_nxv32i16(<vscale x 32 x i16> %a, i16 %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1260 ; CHECK-LABEL: vnmsac_vx_nxv32i16:
1262 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, mu
1263 ; CHECK-NEXT: vnmsac.vx v16, a0, v8, v0.t
1264 ; CHECK-NEXT: vmv8r.v v8, v16
1266 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
1267 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
1268 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
1269 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
1270 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
1271 %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
1272 %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
1273 ret <vscale x 32 x i16> %u
1276 define <vscale x 32 x i16> @vnmsac_vx_nxv32i16_unmasked(<vscale x 32 x i16> %a, i16 %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1277 ; CHECK-LABEL: vnmsac_vx_nxv32i16_unmasked:
1279 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, tu, ma
1280 ; CHECK-NEXT: vnmsac.vx v16, a0, v8
1281 ; CHECK-NEXT: vmv8r.v v8, v16
1283 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
1284 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
1285 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
1286 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
1287 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
1288 %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
1289 %u = call <vscale x 32 x i16> @llvm.vp.merge.nxv32i16(<vscale x 32 x i1> %allones, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
1290 ret <vscale x 32 x i16> %u
1293 define <vscale x 32 x i16> @vnmsac_vv_nxv32i16_ta(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1294 ; CHECK-LABEL: vnmsac_vv_nxv32i16_ta:
1296 ; CHECK-NEXT: vl8re16.v v24, (a0)
1297 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1298 ; CHECK-NEXT: vnmsac.vv v24, v8, v16, v0.t
1299 ; CHECK-NEXT: vmv.v.v v8, v24
1301 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
1302 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
1303 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %allones, i32 %evl)
1304 %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
1305 %u = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
1306 ret <vscale x 32 x i16> %u
1309 define <vscale x 32 x i16> @vnmsac_vx_nxv32i16_ta(<vscale x 32 x i16> %a, i16 %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
1310 ; CHECK-LABEL: vnmsac_vx_nxv32i16_ta:
1312 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
1313 ; CHECK-NEXT: vnmsac.vx v16, a0, v8, v0.t
1314 ; CHECK-NEXT: vmv.v.v v8, v16
1316 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
1317 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
1318 %splat = insertelement <vscale x 32 x i1> poison, i1 -1, i32 0
1319 %allones = shufflevector <vscale x 32 x i1> %splat, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
1320 %x = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %allones, i32 %evl)
1321 %y = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %c, <vscale x 32 x i16> %x, <vscale x 32 x i1> %allones, i32 %evl)
1322 %u = call <vscale x 32 x i16> @llvm.vp.select.nxv32i16(<vscale x 32 x i1> %m, <vscale x 32 x i16> %y, <vscale x 32 x i16> %c, i32 %evl)
1323 ret <vscale x 32 x i16> %u
1326 declare <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
1327 declare <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
1328 declare <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1>, <vscale x 1 x i32>, <vscale x 1 x i32>, i32)
1329 declare <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1>, <vscale x 1 x i32>, <vscale x 1 x i32>, i32)
1331 define <vscale x 1 x i32> @vnmsac_vv_nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1332 ; CHECK-LABEL: vnmsac_vv_nxv1i32:
1334 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
1335 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
1336 ; CHECK-NEXT: vmv1r.v v8, v10
1338 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1339 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1340 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
1341 %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
1342 %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
1343 ret <vscale x 1 x i32> %u
1346 define <vscale x 1 x i32> @vnmsac_vv_nxv1i32_unmasked(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1347 ; CHECK-LABEL: vnmsac_vv_nxv1i32_unmasked:
1349 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
1350 ; CHECK-NEXT: vnmsac.vv v10, v8, v9
1351 ; CHECK-NEXT: vmv1r.v v8, v10
1353 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1354 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1355 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
1356 %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
1357 %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
1358 ret <vscale x 1 x i32> %u
1361 define <vscale x 1 x i32> @vnmsac_vx_nxv1i32(<vscale x 1 x i32> %a, i32 %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1362 ; CHECK-LABEL: vnmsac_vx_nxv1i32:
1364 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu
1365 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
1366 ; CHECK-NEXT: vmv1r.v v8, v9
1368 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
1369 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
1370 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1371 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1372 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
1373 %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
1374 %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
1375 ret <vscale x 1 x i32> %u
1378 define <vscale x 1 x i32> @vnmsac_vx_nxv1i32_unmasked(<vscale x 1 x i32> %a, i32 %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1379 ; CHECK-LABEL: vnmsac_vx_nxv1i32_unmasked:
1381 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
1382 ; CHECK-NEXT: vnmsac.vx v9, a0, v8
1383 ; CHECK-NEXT: vmv1r.v v8, v9
1385 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
1386 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
1387 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1388 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1389 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
1390 %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
1391 %u = call <vscale x 1 x i32> @llvm.vp.merge.nxv1i32(<vscale x 1 x i1> %allones, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
1392 ret <vscale x 1 x i32> %u
1395 define <vscale x 1 x i32> @vnmsac_vv_nxv1i32_ta(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1396 ; CHECK-LABEL: vnmsac_vv_nxv1i32_ta:
1398 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1399 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
1400 ; CHECK-NEXT: vmv1r.v v8, v10
1402 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1403 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1404 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i1> %allones, i32 %evl)
1405 %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
1406 %u = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
1407 ret <vscale x 1 x i32> %u
1410 define <vscale x 1 x i32> @vnmsac_vx_nxv1i32_ta(<vscale x 1 x i32> %a, i32 %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1411 ; CHECK-LABEL: vnmsac_vx_nxv1i32_ta:
1413 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1414 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
1415 ; CHECK-NEXT: vmv1r.v v8, v9
1417 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
1418 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
1419 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1420 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1421 %x = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %allones, i32 %evl)
1422 %y = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %c, <vscale x 1 x i32> %x, <vscale x 1 x i1> %allones, i32 %evl)
1423 %u = call <vscale x 1 x i32> @llvm.vp.select.nxv1i32(<vscale x 1 x i1> %m, <vscale x 1 x i32> %y, <vscale x 1 x i32> %c, i32 %evl)
1424 ret <vscale x 1 x i32> %u
1427 declare <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
1428 declare <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
1429 declare <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
1430 declare <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1>, <vscale x 2 x i32>, <vscale x 2 x i32>, i32)
1432 define <vscale x 2 x i32> @vnmsac_vv_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1433 ; CHECK-LABEL: vnmsac_vv_nxv2i32:
1435 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
1436 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
1437 ; CHECK-NEXT: vmv1r.v v8, v10
1439 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
1440 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1441 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
1442 %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
1443 %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
1444 ret <vscale x 2 x i32> %u
1447 define <vscale x 2 x i32> @vnmsac_vv_nxv2i32_unmasked(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1448 ; CHECK-LABEL: vnmsac_vv_nxv2i32_unmasked:
1450 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
1451 ; CHECK-NEXT: vnmsac.vv v10, v8, v9
1452 ; CHECK-NEXT: vmv1r.v v8, v10
1454 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
1455 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1456 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
1457 %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
1458 %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %allones, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
1459 ret <vscale x 2 x i32> %u
1462 define <vscale x 2 x i32> @vnmsac_vx_nxv2i32(<vscale x 2 x i32> %a, i32 %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1463 ; CHECK-LABEL: vnmsac_vx_nxv2i32:
1465 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
1466 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
1467 ; CHECK-NEXT: vmv1r.v v8, v9
1469 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1470 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1471 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
1472 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1473 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
1474 %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
1475 %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
1476 ret <vscale x 2 x i32> %u
1479 define <vscale x 2 x i32> @vnmsac_vx_nxv2i32_unmasked(<vscale x 2 x i32> %a, i32 %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1480 ; CHECK-LABEL: vnmsac_vx_nxv2i32_unmasked:
1482 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
1483 ; CHECK-NEXT: vnmsac.vx v9, a0, v8
1484 ; CHECK-NEXT: vmv1r.v v8, v9
1486 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1487 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1488 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
1489 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1490 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
1491 %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
1492 %u = call <vscale x 2 x i32> @llvm.vp.merge.nxv2i32(<vscale x 2 x i1> %allones, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
1493 ret <vscale x 2 x i32> %u
1496 define <vscale x 2 x i32> @vnmsac_vv_nxv2i32_ta(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1497 ; CHECK-LABEL: vnmsac_vv_nxv2i32_ta:
1499 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1500 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
1501 ; CHECK-NEXT: vmv.v.v v8, v10
1503 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
1504 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1505 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i1> %allones, i32 %evl)
1506 %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
1507 %u = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
1508 ret <vscale x 2 x i32> %u
1511 define <vscale x 2 x i32> @vnmsac_vx_nxv2i32_ta(<vscale x 2 x i32> %a, i32 %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1512 ; CHECK-LABEL: vnmsac_vx_nxv2i32_ta:
1514 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
1515 ; CHECK-NEXT: vnmsac.vx v9, a0, v8, v0.t
1516 ; CHECK-NEXT: vmv.v.v v8, v9
1518 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
1519 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
1520 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
1521 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1522 %x = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %allones, i32 %evl)
1523 %y = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %c, <vscale x 2 x i32> %x, <vscale x 2 x i1> %allones, i32 %evl)
1524 %u = call <vscale x 2 x i32> @llvm.vp.select.nxv2i32(<vscale x 2 x i1> %m, <vscale x 2 x i32> %y, <vscale x 2 x i32> %c, i32 %evl)
1525 ret <vscale x 2 x i32> %u
1528 declare <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
1529 declare <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
1530 declare <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
1531 declare <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>, i32)
1533 define <vscale x 4 x i32> @vnmsac_vv_nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1534 ; CHECK-LABEL: vnmsac_vv_nxv4i32:
1536 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
1537 ; CHECK-NEXT: vnmsac.vv v12, v8, v10, v0.t
1538 ; CHECK-NEXT: vmv2r.v v8, v12
1540 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
1541 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1542 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
1543 %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
1544 %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
1545 ret <vscale x 4 x i32> %u
1548 define <vscale x 4 x i32> @vnmsac_vv_nxv4i32_unmasked(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1549 ; CHECK-LABEL: vnmsac_vv_nxv4i32_unmasked:
1551 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
1552 ; CHECK-NEXT: vnmsac.vv v12, v8, v10
1553 ; CHECK-NEXT: vmv2r.v v8, v12
1555 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
1556 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1557 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
1558 %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
1559 %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %allones, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
1560 ret <vscale x 4 x i32> %u
1563 define <vscale x 4 x i32> @vnmsac_vx_nxv4i32(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1564 ; CHECK-LABEL: vnmsac_vx_nxv4i32:
1566 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu
1567 ; CHECK-NEXT: vnmsac.vx v10, a0, v8, v0.t
1568 ; CHECK-NEXT: vmv2r.v v8, v10
1570 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1571 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1572 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
1573 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1574 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
1575 %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
1576 %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
1577 ret <vscale x 4 x i32> %u
1580 define <vscale x 4 x i32> @vnmsac_vx_nxv4i32_unmasked(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1581 ; CHECK-LABEL: vnmsac_vx_nxv4i32_unmasked:
1583 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
1584 ; CHECK-NEXT: vnmsac.vx v10, a0, v8
1585 ; CHECK-NEXT: vmv2r.v v8, v10
1587 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1588 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1589 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
1590 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1591 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
1592 %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
1593 %u = call <vscale x 4 x i32> @llvm.vp.merge.nxv4i32(<vscale x 4 x i1> %allones, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
1594 ret <vscale x 4 x i32> %u
1597 define <vscale x 4 x i32> @vnmsac_vv_nxv4i32_ta(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1598 ; CHECK-LABEL: vnmsac_vv_nxv4i32_ta:
1600 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1601 ; CHECK-NEXT: vnmsac.vv v12, v8, v10, v0.t
1602 ; CHECK-NEXT: vmv.v.v v8, v12
1604 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
1605 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1606 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i1> %allones, i32 %evl)
1607 %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
1608 %u = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
1609 ret <vscale x 4 x i32> %u
1612 define <vscale x 4 x i32> @vnmsac_vx_nxv4i32_ta(<vscale x 4 x i32> %a, i32 %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1613 ; CHECK-LABEL: vnmsac_vx_nxv4i32_ta:
1615 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
1616 ; CHECK-NEXT: vnmsac.vx v10, a0, v8, v0.t
1617 ; CHECK-NEXT: vmv.v.v v8, v10
1619 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
1620 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1621 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
1622 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
1623 %x = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %allones, i32 %evl)
1624 %y = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %x, <vscale x 4 x i1> %allones, i32 %evl)
1625 %u = call <vscale x 4 x i32> @llvm.vp.select.nxv4i32(<vscale x 4 x i1> %m, <vscale x 4 x i32> %y, <vscale x 4 x i32> %c, i32 %evl)
1626 ret <vscale x 4 x i32> %u
1629 declare <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
1630 declare <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
1631 declare <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1>, <vscale x 8 x i32>, <vscale x 8 x i32>, i32)
1632 declare <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1>, <vscale x 8 x i32>, <vscale x 8 x i32>, i32)
1634 define <vscale x 8 x i32> @vnmsac_vv_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1635 ; CHECK-LABEL: vnmsac_vv_nxv8i32:
1637 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
1638 ; CHECK-NEXT: vnmsac.vv v16, v8, v12, v0.t
1639 ; CHECK-NEXT: vmv4r.v v8, v16
1641 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1642 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1643 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
1644 %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
1645 %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
1646 ret <vscale x 8 x i32> %u
1649 define <vscale x 8 x i32> @vnmsac_vv_nxv8i32_unmasked(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1650 ; CHECK-LABEL: vnmsac_vv_nxv8i32_unmasked:
1652 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
1653 ; CHECK-NEXT: vnmsac.vv v16, v8, v12
1654 ; CHECK-NEXT: vmv4r.v v8, v16
1656 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1657 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1658 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
1659 %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
1660 %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %allones, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
1661 ret <vscale x 8 x i32> %u
1664 define <vscale x 8 x i32> @vnmsac_vx_nxv8i32(<vscale x 8 x i32> %a, i32 %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1665 ; CHECK-LABEL: vnmsac_vx_nxv8i32:
1667 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu
1668 ; CHECK-NEXT: vnmsac.vx v12, a0, v8, v0.t
1669 ; CHECK-NEXT: vmv4r.v v8, v12
1671 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1672 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1673 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1674 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1675 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
1676 %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
1677 %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
1678 ret <vscale x 8 x i32> %u
1681 define <vscale x 8 x i32> @vnmsac_vx_nxv8i32_unmasked(<vscale x 8 x i32> %a, i32 %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1682 ; CHECK-LABEL: vnmsac_vx_nxv8i32_unmasked:
1684 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
1685 ; CHECK-NEXT: vnmsac.vx v12, a0, v8
1686 ; CHECK-NEXT: vmv4r.v v8, v12
1688 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1689 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1690 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1691 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1692 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
1693 %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
1694 %u = call <vscale x 8 x i32> @llvm.vp.merge.nxv8i32(<vscale x 8 x i1> %allones, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
1695 ret <vscale x 8 x i32> %u
1698 define <vscale x 8 x i32> @vnmsac_vv_nxv8i32_ta(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1699 ; CHECK-LABEL: vnmsac_vv_nxv8i32_ta:
1701 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1702 ; CHECK-NEXT: vnmsac.vv v16, v8, v12, v0.t
1703 ; CHECK-NEXT: vmv.v.v v8, v16
1705 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1706 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1707 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i1> %allones, i32 %evl)
1708 %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
1709 %u = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
1710 ret <vscale x 8 x i32> %u
1713 define <vscale x 8 x i32> @vnmsac_vx_nxv8i32_ta(<vscale x 8 x i32> %a, i32 %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1714 ; CHECK-LABEL: vnmsac_vx_nxv8i32_ta:
1716 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1717 ; CHECK-NEXT: vnmsac.vx v12, a0, v8, v0.t
1718 ; CHECK-NEXT: vmv.v.v v8, v12
1720 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1721 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1722 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
1723 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
1724 %x = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %allones, i32 %evl)
1725 %y = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %c, <vscale x 8 x i32> %x, <vscale x 8 x i1> %allones, i32 %evl)
1726 %u = call <vscale x 8 x i32> @llvm.vp.select.nxv8i32(<vscale x 8 x i1> %m, <vscale x 8 x i32> %y, <vscale x 8 x i32> %c, i32 %evl)
1727 ret <vscale x 8 x i32> %u
1730 declare <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1731 declare <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
1732 declare <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1>, <vscale x 16 x i32>, <vscale x 16 x i32>, i32)
1733 declare <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1>, <vscale x 16 x i32>, <vscale x 16 x i32>, i32)
1735 define <vscale x 16 x i32> @vnmsac_vv_nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1736 ; CHECK-LABEL: vnmsac_vv_nxv16i32:
1738 ; CHECK-NEXT: vl8re32.v v24, (a0)
1739 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
1740 ; CHECK-NEXT: vnmsac.vv v24, v8, v16, v0.t
1741 ; CHECK-NEXT: vmv8r.v v8, v24
1743 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1744 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1745 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
1746 %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
1747 %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
1748 ret <vscale x 16 x i32> %u
1751 define <vscale x 16 x i32> @vnmsac_vv_nxv16i32_unmasked(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1752 ; CHECK-LABEL: vnmsac_vv_nxv16i32_unmasked:
1754 ; CHECK-NEXT: vl8re32.v v24, (a0)
1755 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
1756 ; CHECK-NEXT: vnmsac.vv v24, v8, v16
1757 ; CHECK-NEXT: vmv8r.v v8, v24
1759 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1760 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1761 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
1762 %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
1763 %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %allones, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
1764 ret <vscale x 16 x i32> %u
1767 define <vscale x 16 x i32> @vnmsac_vx_nxv16i32(<vscale x 16 x i32> %a, i32 %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1768 ; CHECK-LABEL: vnmsac_vx_nxv16i32:
1770 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, mu
1771 ; CHECK-NEXT: vnmsac.vx v16, a0, v8, v0.t
1772 ; CHECK-NEXT: vmv8r.v v8, v16
1774 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1775 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1776 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1777 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1778 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
1779 %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
1780 %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
1781 ret <vscale x 16 x i32> %u
1784 define <vscale x 16 x i32> @vnmsac_vx_nxv16i32_unmasked(<vscale x 16 x i32> %a, i32 %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1785 ; CHECK-LABEL: vnmsac_vx_nxv16i32_unmasked:
1787 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma
1788 ; CHECK-NEXT: vnmsac.vx v16, a0, v8
1789 ; CHECK-NEXT: vmv8r.v v8, v16
1791 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1792 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1793 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1794 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1795 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
1796 %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
1797 %u = call <vscale x 16 x i32> @llvm.vp.merge.nxv16i32(<vscale x 16 x i1> %allones, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
1798 ret <vscale x 16 x i32> %u
1801 define <vscale x 16 x i32> @vnmsac_vv_nxv16i32_ta(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1802 ; CHECK-LABEL: vnmsac_vv_nxv16i32_ta:
1804 ; CHECK-NEXT: vl8re32.v v24, (a0)
1805 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1806 ; CHECK-NEXT: vnmsac.vv v24, v8, v16, v0.t
1807 ; CHECK-NEXT: vmv.v.v v8, v24
1809 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1810 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1811 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i1> %allones, i32 %evl)
1812 %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
1813 %u = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
1814 ret <vscale x 16 x i32> %u
1817 define <vscale x 16 x i32> @vnmsac_vx_nxv16i32_ta(<vscale x 16 x i32> %a, i32 %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1818 ; CHECK-LABEL: vnmsac_vx_nxv16i32_ta:
1820 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
1821 ; CHECK-NEXT: vnmsac.vx v16, a0, v8, v0.t
1822 ; CHECK-NEXT: vmv.v.v v8, v16
1824 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
1825 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
1826 %splat = insertelement <vscale x 16 x i1> poison, i1 -1, i32 0
1827 %allones = shufflevector <vscale x 16 x i1> %splat, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
1828 %x = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %allones, i32 %evl)
1829 %y = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %c, <vscale x 16 x i32> %x, <vscale x 16 x i1> %allones, i32 %evl)
1830 %u = call <vscale x 16 x i32> @llvm.vp.select.nxv16i32(<vscale x 16 x i1> %m, <vscale x 16 x i32> %y, <vscale x 16 x i32> %c, i32 %evl)
1831 ret <vscale x 16 x i32> %u
1834 declare <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1835 declare <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
1836 declare <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, i32)
1837 declare <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1>, <vscale x 1 x i64>, <vscale x 1 x i64>, i32)
1839 define <vscale x 1 x i64> @vnmsac_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1840 ; CHECK-LABEL: vnmsac_vv_nxv1i64:
1842 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
1843 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
1844 ; CHECK-NEXT: vmv1r.v v8, v10
1846 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1847 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1848 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
1849 %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
1850 %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
1851 ret <vscale x 1 x i64> %u
1854 define <vscale x 1 x i64> @vnmsac_vv_nxv1i64_unmasked(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1855 ; CHECK-LABEL: vnmsac_vv_nxv1i64_unmasked:
1857 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
1858 ; CHECK-NEXT: vnmsac.vv v10, v8, v9
1859 ; CHECK-NEXT: vmv1r.v v8, v10
1861 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1862 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1863 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
1864 %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
1865 %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %allones, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
1866 ret <vscale x 1 x i64> %u
1869 define <vscale x 1 x i64> @vnmsac_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1870 ; RV32-LABEL: vnmsac_vx_nxv1i64:
1872 ; RV32-NEXT: addi sp, sp, -16
1873 ; RV32-NEXT: .cfi_def_cfa_offset 16
1874 ; RV32-NEXT: sw a1, 12(sp)
1875 ; RV32-NEXT: sw a0, 8(sp)
1876 ; RV32-NEXT: addi a0, sp, 8
1877 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1878 ; RV32-NEXT: vlse64.v v10, (a0), zero
1879 ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu
1880 ; RV32-NEXT: vnmsac.vv v9, v8, v10, v0.t
1881 ; RV32-NEXT: vmv1r.v v8, v9
1882 ; RV32-NEXT: addi sp, sp, 16
1885 ; RV64-LABEL: vnmsac_vx_nxv1i64:
1887 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
1888 ; RV64-NEXT: vnmsac.vx v9, a0, v8, v0.t
1889 ; RV64-NEXT: vmv1r.v v8, v9
1891 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1892 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1893 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1894 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1895 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
1896 %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
1897 %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
1898 ret <vscale x 1 x i64> %u
1901 define <vscale x 1 x i64> @vnmsac_vx_nxv1i64_unmasked(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1902 ; RV32-LABEL: vnmsac_vx_nxv1i64_unmasked:
1904 ; RV32-NEXT: addi sp, sp, -16
1905 ; RV32-NEXT: .cfi_def_cfa_offset 16
1906 ; RV32-NEXT: sw a1, 12(sp)
1907 ; RV32-NEXT: sw a0, 8(sp)
1908 ; RV32-NEXT: addi a0, sp, 8
1909 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1910 ; RV32-NEXT: vlse64.v v10, (a0), zero
1911 ; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma
1912 ; RV32-NEXT: vnmsac.vv v9, v8, v10
1913 ; RV32-NEXT: vmv1r.v v8, v9
1914 ; RV32-NEXT: addi sp, sp, 16
1917 ; RV64-LABEL: vnmsac_vx_nxv1i64_unmasked:
1919 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
1920 ; RV64-NEXT: vnmsac.vx v9, a0, v8
1921 ; RV64-NEXT: vmv1r.v v8, v9
1923 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1924 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1925 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1926 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1927 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
1928 %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
1929 %u = call <vscale x 1 x i64> @llvm.vp.merge.nxv1i64(<vscale x 1 x i1> %allones, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
1930 ret <vscale x 1 x i64> %u
1933 define <vscale x 1 x i64> @vnmsac_vv_nxv1i64_ta(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1934 ; CHECK-LABEL: vnmsac_vv_nxv1i64_ta:
1936 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1937 ; CHECK-NEXT: vnmsac.vv v10, v8, v9, v0.t
1938 ; CHECK-NEXT: vmv.v.v v8, v10
1940 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1941 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1942 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i1> %allones, i32 %evl)
1943 %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
1944 %u = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
1945 ret <vscale x 1 x i64> %u
1948 define <vscale x 1 x i64> @vnmsac_vx_nxv1i64_ta(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1949 ; RV32-LABEL: vnmsac_vx_nxv1i64_ta:
1951 ; RV32-NEXT: addi sp, sp, -16
1952 ; RV32-NEXT: .cfi_def_cfa_offset 16
1953 ; RV32-NEXT: sw a1, 12(sp)
1954 ; RV32-NEXT: sw a0, 8(sp)
1955 ; RV32-NEXT: addi a0, sp, 8
1956 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1957 ; RV32-NEXT: vlse64.v v10, (a0), zero
1958 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
1959 ; RV32-NEXT: vnmsac.vv v9, v8, v10, v0.t
1960 ; RV32-NEXT: vmv.v.v v8, v9
1961 ; RV32-NEXT: addi sp, sp, 16
1964 ; RV64-LABEL: vnmsac_vx_nxv1i64_ta:
1966 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
1967 ; RV64-NEXT: vnmsac.vx v9, a0, v8, v0.t
1968 ; RV64-NEXT: vmv.v.v v8, v9
1970 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
1971 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1972 %splat = insertelement <vscale x 1 x i1> poison, i1 -1, i32 0
1973 %allones = shufflevector <vscale x 1 x i1> %splat, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
1974 %x = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %allones, i32 %evl)
1975 %y = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %c, <vscale x 1 x i64> %x, <vscale x 1 x i1> %allones, i32 %evl)
1976 %u = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> %m, <vscale x 1 x i64> %y, <vscale x 1 x i64> %c, i32 %evl)
1977 ret <vscale x 1 x i64> %u
1980 declare <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1981 declare <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
1982 declare <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
1983 declare <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>, i32)
1985 define <vscale x 2 x i64> @vnmsac_vv_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
1986 ; CHECK-LABEL: vnmsac_vv_nxv2i64:
1988 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
1989 ; CHECK-NEXT: vnmsac.vv v12, v8, v10, v0.t
1990 ; CHECK-NEXT: vmv2r.v v8, v12
1992 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
1993 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
1994 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
1995 %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
1996 %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
1997 ret <vscale x 2 x i64> %u
2000 define <vscale x 2 x i64> @vnmsac_vv_nxv2i64_unmasked(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
2001 ; CHECK-LABEL: vnmsac_vv_nxv2i64_unmasked:
2003 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
2004 ; CHECK-NEXT: vnmsac.vv v12, v8, v10
2005 ; CHECK-NEXT: vmv2r.v v8, v12
2007 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
2008 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
2009 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
2010 %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
2011 %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %allones, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
2012 ret <vscale x 2 x i64> %u
2015 define <vscale x 2 x i64> @vnmsac_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
2016 ; RV32-LABEL: vnmsac_vx_nxv2i64:
2018 ; RV32-NEXT: addi sp, sp, -16
2019 ; RV32-NEXT: .cfi_def_cfa_offset 16
2020 ; RV32-NEXT: sw a1, 12(sp)
2021 ; RV32-NEXT: sw a0, 8(sp)
2022 ; RV32-NEXT: addi a0, sp, 8
2023 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
2024 ; RV32-NEXT: vlse64.v v12, (a0), zero
2025 ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu
2026 ; RV32-NEXT: vnmsac.vv v10, v8, v12, v0.t
2027 ; RV32-NEXT: vmv2r.v v8, v10
2028 ; RV32-NEXT: addi sp, sp, 16
2031 ; RV64-LABEL: vnmsac_vx_nxv2i64:
2033 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, mu
2034 ; RV64-NEXT: vnmsac.vx v10, a0, v8, v0.t
2035 ; RV64-NEXT: vmv2r.v v8, v10
2037 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
2038 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
2039 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
2040 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
2041 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
2042 %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
2043 %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
2044 ret <vscale x 2 x i64> %u
2047 define <vscale x 2 x i64> @vnmsac_vx_nxv2i64_unmasked(<vscale x 2 x i64> %a, i64 %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
2048 ; RV32-LABEL: vnmsac_vx_nxv2i64_unmasked:
2050 ; RV32-NEXT: addi sp, sp, -16
2051 ; RV32-NEXT: .cfi_def_cfa_offset 16
2052 ; RV32-NEXT: sw a1, 12(sp)
2053 ; RV32-NEXT: sw a0, 8(sp)
2054 ; RV32-NEXT: addi a0, sp, 8
2055 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
2056 ; RV32-NEXT: vlse64.v v12, (a0), zero
2057 ; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma
2058 ; RV32-NEXT: vnmsac.vv v10, v8, v12
2059 ; RV32-NEXT: vmv2r.v v8, v10
2060 ; RV32-NEXT: addi sp, sp, 16
2063 ; RV64-LABEL: vnmsac_vx_nxv2i64_unmasked:
2065 ; RV64-NEXT: vsetvli zero, a1, e64, m2, tu, ma
2066 ; RV64-NEXT: vnmsac.vx v10, a0, v8
2067 ; RV64-NEXT: vmv2r.v v8, v10
2069 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
2070 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
2071 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
2072 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
2073 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
2074 %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
2075 %u = call <vscale x 2 x i64> @llvm.vp.merge.nxv2i64(<vscale x 2 x i1> %allones, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
2076 ret <vscale x 2 x i64> %u
2079 define <vscale x 2 x i64> @vnmsac_vv_nxv2i64_ta(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
2080 ; CHECK-LABEL: vnmsac_vv_nxv2i64_ta:
2082 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
2083 ; CHECK-NEXT: vnmsac.vv v12, v8, v10, v0.t
2084 ; CHECK-NEXT: vmv.v.v v8, v12
2086 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
2087 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
2088 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i1> %allones, i32 %evl)
2089 %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
2090 %u = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
2091 ret <vscale x 2 x i64> %u
2094 define <vscale x 2 x i64> @vnmsac_vx_nxv2i64_ta(<vscale x 2 x i64> %a, i64 %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
2095 ; RV32-LABEL: vnmsac_vx_nxv2i64_ta:
2097 ; RV32-NEXT: addi sp, sp, -16
2098 ; RV32-NEXT: .cfi_def_cfa_offset 16
2099 ; RV32-NEXT: sw a1, 12(sp)
2100 ; RV32-NEXT: sw a0, 8(sp)
2101 ; RV32-NEXT: addi a0, sp, 8
2102 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
2103 ; RV32-NEXT: vlse64.v v12, (a0), zero
2104 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
2105 ; RV32-NEXT: vnmsac.vv v10, v8, v12, v0.t
2106 ; RV32-NEXT: vmv.v.v v8, v10
2107 ; RV32-NEXT: addi sp, sp, 16
2110 ; RV64-LABEL: vnmsac_vx_nxv2i64_ta:
2112 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
2113 ; RV64-NEXT: vnmsac.vx v10, a0, v8, v0.t
2114 ; RV64-NEXT: vmv.v.v v8, v10
2116 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
2117 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
2118 %splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
2119 %allones = shufflevector <vscale x 2 x i1> %splat, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
2120 %x = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %allones, i32 %evl)
2121 %y = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %x, <vscale x 2 x i1> %allones, i32 %evl)
2122 %u = call <vscale x 2 x i64> @llvm.vp.select.nxv2i64(<vscale x 2 x i1> %m, <vscale x 2 x i64> %y, <vscale x 2 x i64> %c, i32 %evl)
2123 ret <vscale x 2 x i64> %u
2126 declare <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
2127 declare <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
2128 declare <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1>, <vscale x 4 x i64>, <vscale x 4 x i64>, i32)
2129 declare <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1>, <vscale x 4 x i64>, <vscale x 4 x i64>, i32)
2131 define <vscale x 4 x i64> @vnmsac_vv_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
2132 ; CHECK-LABEL: vnmsac_vv_nxv4i64:
2134 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
2135 ; CHECK-NEXT: vnmsac.vv v16, v8, v12, v0.t
2136 ; CHECK-NEXT: vmv4r.v v8, v16
2138 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
2139 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
2140 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
2141 %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
2142 %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
2143 ret <vscale x 4 x i64> %u
2146 define <vscale x 4 x i64> @vnmsac_vv_nxv4i64_unmasked(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
2147 ; CHECK-LABEL: vnmsac_vv_nxv4i64_unmasked:
2149 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
2150 ; CHECK-NEXT: vnmsac.vv v16, v8, v12
2151 ; CHECK-NEXT: vmv4r.v v8, v16
2153 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
2154 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
2155 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
2156 %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
2157 %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %allones, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
2158 ret <vscale x 4 x i64> %u
2161 define <vscale x 4 x i64> @vnmsac_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
2162 ; RV32-LABEL: vnmsac_vx_nxv4i64:
2164 ; RV32-NEXT: addi sp, sp, -16
2165 ; RV32-NEXT: .cfi_def_cfa_offset 16
2166 ; RV32-NEXT: sw a1, 12(sp)
2167 ; RV32-NEXT: sw a0, 8(sp)
2168 ; RV32-NEXT: addi a0, sp, 8
2169 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
2170 ; RV32-NEXT: vlse64.v v16, (a0), zero
2171 ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu
2172 ; RV32-NEXT: vnmsac.vv v12, v8, v16, v0.t
2173 ; RV32-NEXT: vmv4r.v v8, v12
2174 ; RV32-NEXT: addi sp, sp, 16
2177 ; RV64-LABEL: vnmsac_vx_nxv4i64:
2179 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, mu
2180 ; RV64-NEXT: vnmsac.vx v12, a0, v8, v0.t
2181 ; RV64-NEXT: vmv4r.v v8, v12
2183 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
2184 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
2185 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
2186 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
2187 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
2188 %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
2189 %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
2190 ret <vscale x 4 x i64> %u
2193 define <vscale x 4 x i64> @vnmsac_vx_nxv4i64_unmasked(<vscale x 4 x i64> %a, i64 %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
2194 ; RV32-LABEL: vnmsac_vx_nxv4i64_unmasked:
2196 ; RV32-NEXT: addi sp, sp, -16
2197 ; RV32-NEXT: .cfi_def_cfa_offset 16
2198 ; RV32-NEXT: sw a1, 12(sp)
2199 ; RV32-NEXT: sw a0, 8(sp)
2200 ; RV32-NEXT: addi a0, sp, 8
2201 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
2202 ; RV32-NEXT: vlse64.v v16, (a0), zero
2203 ; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma
2204 ; RV32-NEXT: vnmsac.vv v12, v8, v16
2205 ; RV32-NEXT: vmv4r.v v8, v12
2206 ; RV32-NEXT: addi sp, sp, 16
2209 ; RV64-LABEL: vnmsac_vx_nxv4i64_unmasked:
2211 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma
2212 ; RV64-NEXT: vnmsac.vx v12, a0, v8
2213 ; RV64-NEXT: vmv4r.v v8, v12
2215 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
2216 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
2217 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
2218 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
2219 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
2220 %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
2221 %u = call <vscale x 4 x i64> @llvm.vp.merge.nxv4i64(<vscale x 4 x i1> %allones, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
2222 ret <vscale x 4 x i64> %u
2225 define <vscale x 4 x i64> @vnmsac_vv_nxv4i64_ta(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
2226 ; CHECK-LABEL: vnmsac_vv_nxv4i64_ta:
2228 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
2229 ; CHECK-NEXT: vnmsac.vv v16, v8, v12, v0.t
2230 ; CHECK-NEXT: vmv.v.v v8, v16
2232 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
2233 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
2234 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i1> %allones, i32 %evl)
2235 %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
2236 %u = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
2237 ret <vscale x 4 x i64> %u
2240 define <vscale x 4 x i64> @vnmsac_vx_nxv4i64_ta(<vscale x 4 x i64> %a, i64 %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
2241 ; RV32-LABEL: vnmsac_vx_nxv4i64_ta:
2243 ; RV32-NEXT: addi sp, sp, -16
2244 ; RV32-NEXT: .cfi_def_cfa_offset 16
2245 ; RV32-NEXT: sw a1, 12(sp)
2246 ; RV32-NEXT: sw a0, 8(sp)
2247 ; RV32-NEXT: addi a0, sp, 8
2248 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
2249 ; RV32-NEXT: vlse64.v v16, (a0), zero
2250 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2251 ; RV32-NEXT: vnmsac.vv v12, v8, v16, v0.t
2252 ; RV32-NEXT: vmv.v.v v8, v12
2253 ; RV32-NEXT: addi sp, sp, 16
2256 ; RV64-LABEL: vnmsac_vx_nxv4i64_ta:
2258 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2259 ; RV64-NEXT: vnmsac.vx v12, a0, v8, v0.t
2260 ; RV64-NEXT: vmv.v.v v8, v12
2262 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
2263 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
2264 %splat = insertelement <vscale x 4 x i1> poison, i1 -1, i32 0
2265 %allones = shufflevector <vscale x 4 x i1> %splat, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
2266 %x = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %allones, i32 %evl)
2267 %y = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %c, <vscale x 4 x i64> %x, <vscale x 4 x i1> %allones, i32 %evl)
2268 %u = call <vscale x 4 x i64> @llvm.vp.select.nxv4i64(<vscale x 4 x i1> %m, <vscale x 4 x i64> %y, <vscale x 4 x i64> %c, i32 %evl)
2269 ret <vscale x 4 x i64> %u
2272 declare <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
2273 declare <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
2274 declare <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1>, <vscale x 8 x i64>, <vscale x 8 x i64>, i32)
2275 declare <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1>, <vscale x 8 x i64>, <vscale x 8 x i64>, i32)
2277 define <vscale x 8 x i64> @vnmsac_vv_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2278 ; CHECK-LABEL: vnmsac_vv_nxv8i64:
2280 ; CHECK-NEXT: vl8re64.v v24, (a0)
2281 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu
2282 ; CHECK-NEXT: vnmsac.vv v24, v8, v16, v0.t
2283 ; CHECK-NEXT: vmv8r.v v8, v24
2285 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
2286 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
2287 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
2288 %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
2289 %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
2290 ret <vscale x 8 x i64> %u
2293 define <vscale x 8 x i64> @vnmsac_vv_nxv8i64_unmasked(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2294 ; CHECK-LABEL: vnmsac_vv_nxv8i64_unmasked:
2296 ; CHECK-NEXT: vl8re64.v v24, (a0)
2297 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma
2298 ; CHECK-NEXT: vnmsac.vv v24, v8, v16
2299 ; CHECK-NEXT: vmv8r.v v8, v24
2301 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
2302 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
2303 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
2304 %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
2305 %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %allones, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
2306 ret <vscale x 8 x i64> %u
2309 define <vscale x 8 x i64> @vnmsac_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2310 ; RV32-LABEL: vnmsac_vx_nxv8i64:
2312 ; RV32-NEXT: addi sp, sp, -16
2313 ; RV32-NEXT: .cfi_def_cfa_offset 16
2314 ; RV32-NEXT: sw a1, 12(sp)
2315 ; RV32-NEXT: sw a0, 8(sp)
2316 ; RV32-NEXT: addi a0, sp, 8
2317 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2318 ; RV32-NEXT: vlse64.v v24, (a0), zero
2319 ; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, mu
2320 ; RV32-NEXT: vnmsac.vv v16, v8, v24, v0.t
2321 ; RV32-NEXT: vmv8r.v v8, v16
2322 ; RV32-NEXT: addi sp, sp, 16
2325 ; RV64-LABEL: vnmsac_vx_nxv8i64:
2327 ; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, mu
2328 ; RV64-NEXT: vnmsac.vx v16, a0, v8, v0.t
2329 ; RV64-NEXT: vmv8r.v v8, v16
2331 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2332 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2333 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
2334 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
2335 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
2336 %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
2337 %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
2338 ret <vscale x 8 x i64> %u
2341 define <vscale x 8 x i64> @vnmsac_vx_nxv8i64_unmasked(<vscale x 8 x i64> %a, i64 %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2342 ; RV32-LABEL: vnmsac_vx_nxv8i64_unmasked:
2344 ; RV32-NEXT: addi sp, sp, -16
2345 ; RV32-NEXT: .cfi_def_cfa_offset 16
2346 ; RV32-NEXT: sw a1, 12(sp)
2347 ; RV32-NEXT: sw a0, 8(sp)
2348 ; RV32-NEXT: addi a0, sp, 8
2349 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2350 ; RV32-NEXT: vlse64.v v24, (a0), zero
2351 ; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, ma
2352 ; RV32-NEXT: vnmsac.vv v16, v8, v24
2353 ; RV32-NEXT: vmv8r.v v8, v16
2354 ; RV32-NEXT: addi sp, sp, 16
2357 ; RV64-LABEL: vnmsac_vx_nxv8i64_unmasked:
2359 ; RV64-NEXT: vsetvli zero, a1, e64, m8, tu, ma
2360 ; RV64-NEXT: vnmsac.vx v16, a0, v8
2361 ; RV64-NEXT: vmv8r.v v8, v16
2363 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2364 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2365 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
2366 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
2367 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
2368 %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
2369 %u = call <vscale x 8 x i64> @llvm.vp.merge.nxv8i64(<vscale x 8 x i1> %allones, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
2370 ret <vscale x 8 x i64> %u
2373 define <vscale x 8 x i64> @vnmsac_vv_nxv8i64_ta(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2374 ; CHECK-LABEL: vnmsac_vv_nxv8i64_ta:
2376 ; CHECK-NEXT: vl8re64.v v24, (a0)
2377 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2378 ; CHECK-NEXT: vnmsac.vv v24, v8, v16, v0.t
2379 ; CHECK-NEXT: vmv.v.v v8, v24
2381 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
2382 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
2383 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i1> %allones, i32 %evl)
2384 %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
2385 %u = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
2386 ret <vscale x 8 x i64> %u
2389 define <vscale x 8 x i64> @vnmsac_vx_nxv8i64_ta(<vscale x 8 x i64> %a, i64 %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
2390 ; RV32-LABEL: vnmsac_vx_nxv8i64_ta:
2392 ; RV32-NEXT: addi sp, sp, -16
2393 ; RV32-NEXT: .cfi_def_cfa_offset 16
2394 ; RV32-NEXT: sw a1, 12(sp)
2395 ; RV32-NEXT: sw a0, 8(sp)
2396 ; RV32-NEXT: addi a0, sp, 8
2397 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
2398 ; RV32-NEXT: vlse64.v v24, (a0), zero
2399 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
2400 ; RV32-NEXT: vnmsac.vv v16, v8, v24, v0.t
2401 ; RV32-NEXT: vmv.v.v v8, v16
2402 ; RV32-NEXT: addi sp, sp, 16
2405 ; RV64-LABEL: vnmsac_vx_nxv8i64_ta:
2407 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
2408 ; RV64-NEXT: vnmsac.vx v16, a0, v8, v0.t
2409 ; RV64-NEXT: vmv.v.v v8, v16
2411 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2412 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2413 %splat = insertelement <vscale x 8 x i1> poison, i1 -1, i32 0
2414 %allones = shufflevector <vscale x 8 x i1> %splat, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
2415 %x = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %allones, i32 %evl)
2416 %y = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %c, <vscale x 8 x i64> %x, <vscale x 8 x i1> %allones, i32 %evl)
2417 %u = call <vscale x 8 x i64> @llvm.vp.select.nxv8i64(<vscale x 8 x i1> %m, <vscale x 8 x i64> %y, <vscale x 8 x i64> %c, i32 %evl)
2418 ret <vscale x 8 x i64> %u