1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
3 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
5 define <vscale x 1 x i32> @vmulhu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
6 ; CHECK-LABEL: vmulhu_vv_nxv1i32:
8 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
9 ; CHECK-NEXT: vmulhu.vv v8, v9, v8
11 %vc = zext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
12 %vd = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
13 %ve = mul <vscale x 1 x i64> %vc, %vd
14 %vf = lshr <vscale x 1 x i64> %ve, splat (i64 32)
15 %vg = trunc <vscale x 1 x i64> %vf to <vscale x 1 x i32>
16 ret <vscale x 1 x i32> %vg
19 define <vscale x 1 x i32> @vmulhu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
20 ; CHECK-LABEL: vmulhu_vx_nxv1i32:
22 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
23 ; CHECK-NEXT: vmulhu.vx v8, v8, a0
25 %head1 = insertelement <vscale x 1 x i32> poison, i32 %x, i32 0
26 %splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
27 %vb = zext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
28 %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
29 %vd = mul <vscale x 1 x i64> %vb, %vc
30 %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
31 %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
32 ret <vscale x 1 x i32> %vf
35 define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
36 ; CHECK-LABEL: vmulhu_vi_nxv1i32_0:
38 ; CHECK-NEXT: li a0, -7
39 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
40 ; CHECK-NEXT: vmulhu.vx v8, v8, a0
42 %vb = zext <vscale x 1 x i32> splat (i32 -7) to <vscale x 1 x i64>
43 %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
44 %vd = mul <vscale x 1 x i64> %vb, %vc
45 %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
46 %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
47 ret <vscale x 1 x i32> %vf
50 define <vscale x 1 x i32> @vmulhu_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
51 ; RV32-LABEL: vmulhu_vi_nxv1i32_1:
53 ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
54 ; RV32-NEXT: vsrl.vi v8, v8, 28
57 ; RV64-LABEL: vmulhu_vi_nxv1i32_1:
59 ; RV64-NEXT: li a0, 16
60 ; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
61 ; RV64-NEXT: vmulhu.vx v8, v8, a0
63 %vb = zext <vscale x 1 x i32> splat (i32 16) to <vscale x 1 x i64>
64 %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
65 %vd = mul <vscale x 1 x i64> %vb, %vc
66 %ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
67 %vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
68 ret <vscale x 1 x i32> %vf
71 define <vscale x 2 x i32> @vmulhu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
72 ; CHECK-LABEL: vmulhu_vv_nxv2i32:
74 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
75 ; CHECK-NEXT: vmulhu.vv v8, v9, v8
77 %vc = zext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
78 %vd = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
79 %ve = mul <vscale x 2 x i64> %vc, %vd
80 %vf = lshr <vscale x 2 x i64> %ve, splat (i64 32)
81 %vg = trunc <vscale x 2 x i64> %vf to <vscale x 2 x i32>
82 ret <vscale x 2 x i32> %vg
85 define <vscale x 2 x i32> @vmulhu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
86 ; CHECK-LABEL: vmulhu_vx_nxv2i32:
88 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
89 ; CHECK-NEXT: vmulhu.vx v8, v8, a0
91 %head1 = insertelement <vscale x 2 x i32> poison, i32 %x, i32 0
92 %splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
93 %vb = zext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
94 %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
95 %vd = mul <vscale x 2 x i64> %vb, %vc
96 %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
97 %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
98 ret <vscale x 2 x i32> %vf
101 define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
102 ; CHECK-LABEL: vmulhu_vi_nxv2i32_0:
104 ; CHECK-NEXT: li a0, -7
105 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
106 ; CHECK-NEXT: vmulhu.vx v8, v8, a0
108 %vb = zext <vscale x 2 x i32> splat (i32 -7) to <vscale x 2 x i64>
109 %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
110 %vd = mul <vscale x 2 x i64> %vb, %vc
111 %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
112 %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
113 ret <vscale x 2 x i32> %vf
116 define <vscale x 2 x i32> @vmulhu_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
117 ; RV32-LABEL: vmulhu_vi_nxv2i32_1:
119 ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
120 ; RV32-NEXT: vsrl.vi v8, v8, 28
123 ; RV64-LABEL: vmulhu_vi_nxv2i32_1:
125 ; RV64-NEXT: li a0, 16
126 ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
127 ; RV64-NEXT: vmulhu.vx v8, v8, a0
129 %vb = zext <vscale x 2 x i32> splat (i32 16) to <vscale x 2 x i64>
130 %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
131 %vd = mul <vscale x 2 x i64> %vb, %vc
132 %ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
133 %vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
134 ret <vscale x 2 x i32> %vf
137 define <vscale x 4 x i32> @vmulhu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
138 ; CHECK-LABEL: vmulhu_vv_nxv4i32:
140 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
141 ; CHECK-NEXT: vmulhu.vv v8, v10, v8
143 %vc = zext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
144 %vd = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
145 %ve = mul <vscale x 4 x i64> %vc, %vd
146 %vf = lshr <vscale x 4 x i64> %ve, splat (i64 32)
147 %vg = trunc <vscale x 4 x i64> %vf to <vscale x 4 x i32>
148 ret <vscale x 4 x i32> %vg
151 define <vscale x 4 x i32> @vmulhu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
152 ; CHECK-LABEL: vmulhu_vx_nxv4i32:
154 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
155 ; CHECK-NEXT: vmulhu.vx v8, v8, a0
157 %head1 = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
158 %splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
159 %vb = zext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
160 %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
161 %vd = mul <vscale x 4 x i64> %vb, %vc
162 %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
163 %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
164 ret <vscale x 4 x i32> %vf
167 define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
168 ; CHECK-LABEL: vmulhu_vi_nxv4i32_0:
170 ; CHECK-NEXT: li a0, -7
171 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
172 ; CHECK-NEXT: vmulhu.vx v8, v8, a0
174 %vb = zext <vscale x 4 x i32> splat (i32 -7) to <vscale x 4 x i64>
175 %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
176 %vd = mul <vscale x 4 x i64> %vb, %vc
177 %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
178 %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
179 ret <vscale x 4 x i32> %vf
182 define <vscale x 4 x i32> @vmulhu_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
183 ; RV32-LABEL: vmulhu_vi_nxv4i32_1:
185 ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
186 ; RV32-NEXT: vsrl.vi v8, v8, 28
189 ; RV64-LABEL: vmulhu_vi_nxv4i32_1:
191 ; RV64-NEXT: li a0, 16
192 ; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma
193 ; RV64-NEXT: vmulhu.vx v8, v8, a0
195 %vb = zext <vscale x 4 x i32> splat (i32 16) to <vscale x 4 x i64>
196 %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
197 %vd = mul <vscale x 4 x i64> %vb, %vc
198 %ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
199 %vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
200 ret <vscale x 4 x i32> %vf
203 define <vscale x 8 x i32> @vmulhu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
204 ; CHECK-LABEL: vmulhu_vv_nxv8i32:
206 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
207 ; CHECK-NEXT: vmulhu.vv v8, v12, v8
209 %vc = zext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
210 %vd = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
211 %ve = mul <vscale x 8 x i64> %vc, %vd
212 %vf = lshr <vscale x 8 x i64> %ve, splat (i64 32)
213 %vg = trunc <vscale x 8 x i64> %vf to <vscale x 8 x i32>
214 ret <vscale x 8 x i32> %vg
217 define <vscale x 8 x i32> @vmulhu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
218 ; CHECK-LABEL: vmulhu_vx_nxv8i32:
220 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
221 ; CHECK-NEXT: vmulhu.vx v8, v8, a0
223 %head1 = insertelement <vscale x 8 x i32> poison, i32 %x, i32 0
224 %splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
225 %vb = zext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
226 %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
227 %vd = mul <vscale x 8 x i64> %vb, %vc
228 %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
229 %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
230 ret <vscale x 8 x i32> %vf
233 define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
234 ; CHECK-LABEL: vmulhu_vi_nxv8i32_0:
236 ; CHECK-NEXT: li a0, -7
237 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
238 ; CHECK-NEXT: vmulhu.vx v8, v8, a0
240 %vb = zext <vscale x 8 x i32> splat (i32 -7) to <vscale x 8 x i64>
241 %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
242 %vd = mul <vscale x 8 x i64> %vb, %vc
243 %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
244 %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
245 ret <vscale x 8 x i32> %vf
248 define <vscale x 8 x i32> @vmulhu_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
249 ; RV32-LABEL: vmulhu_vi_nxv8i32_1:
251 ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
252 ; RV32-NEXT: vsrl.vi v8, v8, 28
255 ; RV64-LABEL: vmulhu_vi_nxv8i32_1:
257 ; RV64-NEXT: li a0, 16
258 ; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma
259 ; RV64-NEXT: vmulhu.vx v8, v8, a0
261 %vb = zext <vscale x 8 x i32> splat (i32 16) to <vscale x 8 x i64>
262 %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
263 %vd = mul <vscale x 8 x i64> %vb, %vc
264 %ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
265 %vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
266 ret <vscale x 8 x i32> %vf