1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
5 ; Test that the prepareSREMEqFold optimization doesn't crash on scalable
7 define <vscale x 4 x i1> @srem_eq_fold_nxv4i8(<vscale x 4 x i8> %va) {
8 ; CHECK-LABEL: srem_eq_fold_nxv4i8:
10 ; CHECK-NEXT: li a0, 42
11 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
12 ; CHECK-NEXT: vmv.v.x v9, a0
13 ; CHECK-NEXT: li a1, -85
14 ; CHECK-NEXT: vmacc.vx v9, a1, v8
15 ; CHECK-NEXT: vsll.vi v8, v9, 7
16 ; CHECK-NEXT: vsrl.vi v9, v9, 1
17 ; CHECK-NEXT: vor.vv v8, v9, v8
18 ; CHECK-NEXT: vmsleu.vx v0, v8, a0
20 %rem = srem <vscale x 4 x i8> %va, splat (i8 6)
22 %cc = icmp eq <vscale x 4 x i8> %rem, zeroinitializer
23 ret <vscale x 4 x i1> %cc
26 define <vscale x 1 x i32> @vmulh_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
27 ; CHECK-LABEL: vmulh_vv_nxv1i32:
29 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
30 ; CHECK-NEXT: vmulh.vv v8, v9, v8
32 %vc = sext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
33 %vd = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
34 %ve = mul <vscale x 1 x i64> %vc, %vd
35 %vf = lshr <vscale x 1 x i64> %ve, splat (i64 32)
36 %vg = trunc <vscale x 1 x i64> %vf to <vscale x 1 x i32>
37 ret <vscale x 1 x i32> %vg
40 define <vscale x 1 x i32> @vmulh_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
41 ; CHECK-LABEL: vmulh_vx_nxv1i32:
43 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
44 ; CHECK-NEXT: vmulh.vx v8, v8, a0
46 %head1 = insertelement <vscale x 1 x i32> poison, i32 %x, i32 0
47 %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
48 %vb = sext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
49 %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
50 %vd = mul <vscale x 1 x i64> %vb, %vc
51 %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
52 %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
53 ret <vscale x 1 x i32> %vf
56 define <vscale x 1 x i32> @vmulh_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
57 ; CHECK-LABEL: vmulh_vi_nxv1i32_0:
59 ; CHECK-NEXT: li a0, -7
60 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
61 ; CHECK-NEXT: vmulh.vx v8, v8, a0
63 %vb = sext <vscale x 1 x i32> splat (i32 -7) to <vscale x 1 x i64>
64 %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
65 %vd = mul <vscale x 1 x i64> %vb, %vc
66 %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
67 %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
68 ret <vscale x 1 x i32> %vf
71 define <vscale x 1 x i32> @vmulh_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
72 ; CHECK-LABEL: vmulh_vi_nxv1i32_1:
74 ; CHECK-NEXT: li a0, 16
75 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
76 ; CHECK-NEXT: vmulh.vx v8, v8, a0
78 %vb = sext <vscale x 1 x i32> splat (i32 16) to <vscale x 1 x i64>
79 %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
80 %vd = mul <vscale x 1 x i64> %vb, %vc
81 %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
82 %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
83 ret <vscale x 1 x i32> %vf
86 define <vscale x 2 x i32> @vmulh_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
87 ; CHECK-LABEL: vmulh_vv_nxv2i32:
89 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
90 ; CHECK-NEXT: vmulh.vv v8, v9, v8
92 %vc = sext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
93 %vd = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
94 %ve = mul <vscale x 2 x i64> %vc, %vd
95 %vf = lshr <vscale x 2 x i64> %ve, splat (i64 32)
96 %vg = trunc <vscale x 2 x i64> %vf to <vscale x 2 x i32>
97 ret <vscale x 2 x i32> %vg
100 define <vscale x 2 x i32> @vmulh_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
101 ; CHECK-LABEL: vmulh_vx_nxv2i32:
103 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
104 ; CHECK-NEXT: vmulh.vx v8, v8, a0
106 %head1 = insertelement <vscale x 2 x i32> poison, i32 %x, i32 0
107 %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
108 %vb = sext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
109 %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
110 %vd = mul <vscale x 2 x i64> %vb, %vc
111 %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
112 %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
113 ret <vscale x 2 x i32> %vf
116 define <vscale x 2 x i32> @vmulh_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
117 ; CHECK-LABEL: vmulh_vi_nxv2i32_0:
119 ; CHECK-NEXT: li a0, -7
120 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
121 ; CHECK-NEXT: vmulh.vx v8, v8, a0
123 %vb = sext <vscale x 2 x i32> splat (i32 -7) to <vscale x 2 x i64>
124 %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
125 %vd = mul <vscale x 2 x i64> %vb, %vc
126 %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
127 %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
128 ret <vscale x 2 x i32> %vf
131 define <vscale x 2 x i32> @vmulh_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
132 ; CHECK-LABEL: vmulh_vi_nxv2i32_1:
134 ; CHECK-NEXT: li a0, 16
135 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
136 ; CHECK-NEXT: vmulh.vx v8, v8, a0
138 %vb = sext <vscale x 2 x i32> splat (i32 16) to <vscale x 2 x i64>
139 %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
140 %vd = mul <vscale x 2 x i64> %vb, %vc
141 %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
142 %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
143 ret <vscale x 2 x i32> %vf
146 define <vscale x 4 x i32> @vmulh_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
147 ; CHECK-LABEL: vmulh_vv_nxv4i32:
149 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
150 ; CHECK-NEXT: vmulh.vv v8, v10, v8
152 %vc = sext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
153 %vd = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
154 %ve = mul <vscale x 4 x i64> %vc, %vd
155 %vf = lshr <vscale x 4 x i64> %ve, splat (i64 32)
156 %vg = trunc <vscale x 4 x i64> %vf to <vscale x 4 x i32>
157 ret <vscale x 4 x i32> %vg
160 define <vscale x 4 x i32> @vmulh_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
161 ; CHECK-LABEL: vmulh_vx_nxv4i32:
163 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
164 ; CHECK-NEXT: vmulh.vx v8, v8, a0
166 %head1 = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
167 %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
168 %vb = sext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
169 %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
170 %vd = mul <vscale x 4 x i64> %vb, %vc
171 %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
172 %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
173 ret <vscale x 4 x i32> %vf
176 define <vscale x 4 x i32> @vmulh_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
177 ; CHECK-LABEL: vmulh_vi_nxv4i32_0:
179 ; CHECK-NEXT: li a0, -7
180 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
181 ; CHECK-NEXT: vmulh.vx v8, v8, a0
183 %vb = sext <vscale x 4 x i32> splat (i32 -7) to <vscale x 4 x i64>
184 %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
185 %vd = mul <vscale x 4 x i64> %vb, %vc
186 %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
187 %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
188 ret <vscale x 4 x i32> %vf
191 define <vscale x 4 x i32> @vmulh_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
192 ; CHECK-LABEL: vmulh_vi_nxv4i32_1:
194 ; CHECK-NEXT: li a0, 16
195 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
196 ; CHECK-NEXT: vmulh.vx v8, v8, a0
198 %vb = sext <vscale x 4 x i32> splat (i32 16) to <vscale x 4 x i64>
199 %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
200 %vd = mul <vscale x 4 x i64> %vb, %vc
201 %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
202 %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
203 ret <vscale x 4 x i32> %vf
206 define <vscale x 8 x i32> @vmulh_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
207 ; CHECK-LABEL: vmulh_vv_nxv8i32:
209 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
210 ; CHECK-NEXT: vmulh.vv v8, v12, v8
212 %vc = sext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
213 %vd = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
214 %ve = mul <vscale x 8 x i64> %vc, %vd
215 %vf = lshr <vscale x 8 x i64> %ve, splat (i64 32)
216 %vg = trunc <vscale x 8 x i64> %vf to <vscale x 8 x i32>
217 ret <vscale x 8 x i32> %vg
220 define <vscale x 8 x i32> @vmulh_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
221 ; CHECK-LABEL: vmulh_vx_nxv8i32:
223 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
224 ; CHECK-NEXT: vmulh.vx v8, v8, a0
226 %head1 = insertelement <vscale x 8 x i32> poison, i32 %x, i32 0
227 %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
228 %vb = sext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
229 %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
230 %vd = mul <vscale x 8 x i64> %vb, %vc
231 %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
232 %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
233 ret <vscale x 8 x i32> %vf
236 define <vscale x 8 x i32> @vmulh_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
237 ; CHECK-LABEL: vmulh_vi_nxv8i32_0:
239 ; CHECK-NEXT: li a0, -7
240 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
241 ; CHECK-NEXT: vmulh.vx v8, v8, a0
243 %vb = sext <vscale x 8 x i32> splat (i32 -7) to <vscale x 8 x i64>
244 %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
245 %vd = mul <vscale x 8 x i64> %vb, %vc
246 %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
247 %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
248 ret <vscale x 8 x i32> %vf
251 define <vscale x 8 x i32> @vmulh_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
252 ; CHECK-LABEL: vmulh_vi_nxv8i32_1:
254 ; CHECK-NEXT: li a0, 16
255 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
256 ; CHECK-NEXT: vmulh.vx v8, v8, a0
258 %vb = sext <vscale x 8 x i32> splat (i32 16) to <vscale x 8 x i64>
259 %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
260 %vd = mul <vscale x 8 x i64> %vb, %vc
261 %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
262 %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
263 ret <vscale x 8 x i32> %vf