1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-V
3 ; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZVE64X
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-V
5 ; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZVE64X
7 define <vscale x 1 x i8> @vdiv_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
8 ; CHECK-LABEL: vdiv_vv_nxv1i8:
10 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
11 ; CHECK-NEXT: vdiv.vv v8, v8, v9
13 %vc = sdiv <vscale x 1 x i8> %va, %vb
14 ret <vscale x 1 x i8> %vc
17 define <vscale x 1 x i8> @vdiv_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
18 ; CHECK-LABEL: vdiv_vx_nxv1i8:
20 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
21 ; CHECK-NEXT: vdiv.vx v8, v8, a0
23 %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
24 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
25 %vc = sdiv <vscale x 1 x i8> %va, %splat
26 ret <vscale x 1 x i8> %vc
29 define <vscale x 1 x i8> @vdiv_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
30 ; CHECK-LABEL: vdiv_vi_nxv1i8_0:
32 ; CHECK-NEXT: li a0, 109
33 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
34 ; CHECK-NEXT: vmulh.vx v9, v8, a0
35 ; CHECK-NEXT: vsub.vv v8, v9, v8
36 ; CHECK-NEXT: vsra.vi v8, v8, 2
37 ; CHECK-NEXT: vsrl.vi v9, v8, 7
38 ; CHECK-NEXT: vadd.vv v8, v8, v9
40 %vc = sdiv <vscale x 1 x i8> %va, splat (i8 -7)
41 ret <vscale x 1 x i8> %vc
44 ; Test V/1 to see if we can optimize it away for scalable vectors.
45 define <vscale x 1 x i8> @vdiv_vi_nxv1i8_1(<vscale x 1 x i8> %va) {
46 ; CHECK-LABEL: vdiv_vi_nxv1i8_1:
49 %vc = sdiv <vscale x 1 x i8> %va, splat (i8 1)
50 ret <vscale x 1 x i8> %vc
53 ; Test 0/V to see if we can optimize it away for scalable vectors.
54 define <vscale x 1 x i8> @vdiv_iv_nxv1i8_0(<vscale x 1 x i8> %va) {
55 ; CHECK-LABEL: vdiv_iv_nxv1i8_0:
57 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
58 ; CHECK-NEXT: vmv.v.i v8, 0
60 %vc = sdiv <vscale x 1 x i8> splat (i8 0), %va
61 ret <vscale x 1 x i8> %vc
64 define <vscale x 2 x i8> @vdiv_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
65 ; CHECK-LABEL: vdiv_vv_nxv2i8:
67 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
68 ; CHECK-NEXT: vdiv.vv v8, v8, v9
70 %vc = sdiv <vscale x 2 x i8> %va, %vb
71 ret <vscale x 2 x i8> %vc
74 define <vscale x 2 x i8> @vdiv_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
75 ; CHECK-LABEL: vdiv_vx_nxv2i8:
77 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
78 ; CHECK-NEXT: vdiv.vx v8, v8, a0
80 %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
81 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
82 %vc = sdiv <vscale x 2 x i8> %va, %splat
83 ret <vscale x 2 x i8> %vc
86 define <vscale x 2 x i8> @vdiv_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
87 ; CHECK-LABEL: vdiv_vi_nxv2i8_0:
89 ; CHECK-NEXT: li a0, 109
90 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
91 ; CHECK-NEXT: vmulh.vx v9, v8, a0
92 ; CHECK-NEXT: vsub.vv v8, v9, v8
93 ; CHECK-NEXT: vsra.vi v8, v8, 2
94 ; CHECK-NEXT: vsrl.vi v9, v8, 7
95 ; CHECK-NEXT: vadd.vv v8, v8, v9
97 %vc = sdiv <vscale x 2 x i8> %va, splat (i8 -7)
98 ret <vscale x 2 x i8> %vc
101 define <vscale x 4 x i8> @vdiv_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
102 ; CHECK-LABEL: vdiv_vv_nxv4i8:
104 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
105 ; CHECK-NEXT: vdiv.vv v8, v8, v9
107 %vc = sdiv <vscale x 4 x i8> %va, %vb
108 ret <vscale x 4 x i8> %vc
111 define <vscale x 4 x i8> @vdiv_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
112 ; CHECK-LABEL: vdiv_vx_nxv4i8:
114 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
115 ; CHECK-NEXT: vdiv.vx v8, v8, a0
117 %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
118 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
119 %vc = sdiv <vscale x 4 x i8> %va, %splat
120 ret <vscale x 4 x i8> %vc
123 define <vscale x 4 x i8> @vdiv_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
124 ; CHECK-LABEL: vdiv_vi_nxv4i8_0:
126 ; CHECK-NEXT: li a0, 109
127 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
128 ; CHECK-NEXT: vmulh.vx v9, v8, a0
129 ; CHECK-NEXT: vsub.vv v8, v9, v8
130 ; CHECK-NEXT: vsra.vi v8, v8, 2
131 ; CHECK-NEXT: vsrl.vi v9, v8, 7
132 ; CHECK-NEXT: vadd.vv v8, v8, v9
134 %vc = sdiv <vscale x 4 x i8> %va, splat (i8 -7)
135 ret <vscale x 4 x i8> %vc
138 define <vscale x 8 x i8> @vdiv_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
139 ; CHECK-LABEL: vdiv_vv_nxv8i8:
141 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
142 ; CHECK-NEXT: vdiv.vv v8, v8, v9
144 %vc = sdiv <vscale x 8 x i8> %va, %vb
145 ret <vscale x 8 x i8> %vc
148 define <vscale x 8 x i8> @vdiv_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
149 ; CHECK-LABEL: vdiv_vx_nxv8i8:
151 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
152 ; CHECK-NEXT: vdiv.vx v8, v8, a0
154 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
155 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
156 %vc = sdiv <vscale x 8 x i8> %va, %splat
157 ret <vscale x 8 x i8> %vc
160 define <vscale x 8 x i8> @vdiv_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
161 ; CHECK-LABEL: vdiv_vi_nxv8i8_0:
163 ; CHECK-NEXT: li a0, 109
164 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
165 ; CHECK-NEXT: vmulh.vx v9, v8, a0
166 ; CHECK-NEXT: vsub.vv v8, v9, v8
167 ; CHECK-NEXT: vsra.vi v8, v8, 2
168 ; CHECK-NEXT: vsrl.vi v9, v8, 7
169 ; CHECK-NEXT: vadd.vv v8, v8, v9
171 %vc = sdiv <vscale x 8 x i8> %va, splat (i8 -7)
172 ret <vscale x 8 x i8> %vc
175 define <vscale x 16 x i8> @vdiv_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
176 ; CHECK-LABEL: vdiv_vv_nxv16i8:
178 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
179 ; CHECK-NEXT: vdiv.vv v8, v8, v10
181 %vc = sdiv <vscale x 16 x i8> %va, %vb
182 ret <vscale x 16 x i8> %vc
185 define <vscale x 16 x i8> @vdiv_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
186 ; CHECK-LABEL: vdiv_vx_nxv16i8:
188 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
189 ; CHECK-NEXT: vdiv.vx v8, v8, a0
191 %head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
192 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
193 %vc = sdiv <vscale x 16 x i8> %va, %splat
194 ret <vscale x 16 x i8> %vc
197 define <vscale x 16 x i8> @vdiv_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
198 ; CHECK-LABEL: vdiv_vi_nxv16i8_0:
200 ; CHECK-NEXT: li a0, 109
201 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
202 ; CHECK-NEXT: vmulh.vx v10, v8, a0
203 ; CHECK-NEXT: vsub.vv v8, v10, v8
204 ; CHECK-NEXT: vsra.vi v8, v8, 2
205 ; CHECK-NEXT: vsrl.vi v10, v8, 7
206 ; CHECK-NEXT: vadd.vv v8, v8, v10
208 %vc = sdiv <vscale x 16 x i8> %va, splat (i8 -7)
209 ret <vscale x 16 x i8> %vc
212 define <vscale x 32 x i8> @vdiv_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
213 ; CHECK-LABEL: vdiv_vv_nxv32i8:
215 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
216 ; CHECK-NEXT: vdiv.vv v8, v8, v12
218 %vc = sdiv <vscale x 32 x i8> %va, %vb
219 ret <vscale x 32 x i8> %vc
222 define <vscale x 32 x i8> @vdiv_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
223 ; CHECK-LABEL: vdiv_vx_nxv32i8:
225 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
226 ; CHECK-NEXT: vdiv.vx v8, v8, a0
228 %head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
229 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
230 %vc = sdiv <vscale x 32 x i8> %va, %splat
231 ret <vscale x 32 x i8> %vc
234 define <vscale x 32 x i8> @vdiv_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
235 ; CHECK-LABEL: vdiv_vi_nxv32i8_0:
237 ; CHECK-NEXT: li a0, 109
238 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
239 ; CHECK-NEXT: vmulh.vx v12, v8, a0
240 ; CHECK-NEXT: vsub.vv v8, v12, v8
241 ; CHECK-NEXT: vsra.vi v8, v8, 2
242 ; CHECK-NEXT: vsrl.vi v12, v8, 7
243 ; CHECK-NEXT: vadd.vv v8, v8, v12
245 %vc = sdiv <vscale x 32 x i8> %va, splat (i8 -7)
246 ret <vscale x 32 x i8> %vc
249 define <vscale x 64 x i8> @vdiv_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
250 ; CHECK-LABEL: vdiv_vv_nxv64i8:
252 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
253 ; CHECK-NEXT: vdiv.vv v8, v8, v16
255 %vc = sdiv <vscale x 64 x i8> %va, %vb
256 ret <vscale x 64 x i8> %vc
259 define <vscale x 64 x i8> @vdiv_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
260 ; CHECK-LABEL: vdiv_vx_nxv64i8:
262 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
263 ; CHECK-NEXT: vdiv.vx v8, v8, a0
265 %head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
266 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
267 %vc = sdiv <vscale x 64 x i8> %va, %splat
268 ret <vscale x 64 x i8> %vc
271 define <vscale x 64 x i8> @vdiv_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
272 ; CHECK-LABEL: vdiv_vi_nxv64i8_0:
274 ; CHECK-NEXT: li a0, 109
275 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
276 ; CHECK-NEXT: vmulh.vx v16, v8, a0
277 ; CHECK-NEXT: vsub.vv v8, v16, v8
278 ; CHECK-NEXT: vsra.vi v8, v8, 2
279 ; CHECK-NEXT: vsrl.vi v16, v8, 7
280 ; CHECK-NEXT: vadd.vv v8, v8, v16
282 %vc = sdiv <vscale x 64 x i8> %va, splat (i8 -7)
283 ret <vscale x 64 x i8> %vc
286 define <vscale x 1 x i16> @vdiv_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
287 ; CHECK-LABEL: vdiv_vv_nxv1i16:
289 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
290 ; CHECK-NEXT: vdiv.vv v8, v8, v9
292 %vc = sdiv <vscale x 1 x i16> %va, %vb
293 ret <vscale x 1 x i16> %vc
296 define <vscale x 1 x i16> @vdiv_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
297 ; CHECK-LABEL: vdiv_vx_nxv1i16:
299 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
300 ; CHECK-NEXT: vdiv.vx v8, v8, a0
302 %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
303 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
304 %vc = sdiv <vscale x 1 x i16> %va, %splat
305 ret <vscale x 1 x i16> %vc
308 define <vscale x 1 x i16> @vdiv_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
309 ; CHECK-LABEL: vdiv_vi_nxv1i16_0:
311 ; CHECK-NEXT: lui a0, 1048571
312 ; CHECK-NEXT: addi a0, a0, 1755
313 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
314 ; CHECK-NEXT: vmulh.vx v8, v8, a0
315 ; CHECK-NEXT: vsra.vi v8, v8, 1
316 ; CHECK-NEXT: vsrl.vi v9, v8, 15
317 ; CHECK-NEXT: vadd.vv v8, v8, v9
319 %vc = sdiv <vscale x 1 x i16> %va, splat (i16 -7)
320 ret <vscale x 1 x i16> %vc
323 define <vscale x 2 x i16> @vdiv_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
324 ; CHECK-LABEL: vdiv_vv_nxv2i16:
326 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
327 ; CHECK-NEXT: vdiv.vv v8, v8, v9
329 %vc = sdiv <vscale x 2 x i16> %va, %vb
330 ret <vscale x 2 x i16> %vc
333 define <vscale x 2 x i16> @vdiv_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
334 ; CHECK-LABEL: vdiv_vx_nxv2i16:
336 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
337 ; CHECK-NEXT: vdiv.vx v8, v8, a0
339 %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
340 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
341 %vc = sdiv <vscale x 2 x i16> %va, %splat
342 ret <vscale x 2 x i16> %vc
345 define <vscale x 2 x i16> @vdiv_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
346 ; CHECK-LABEL: vdiv_vi_nxv2i16_0:
348 ; CHECK-NEXT: lui a0, 1048571
349 ; CHECK-NEXT: addi a0, a0, 1755
350 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
351 ; CHECK-NEXT: vmulh.vx v8, v8, a0
352 ; CHECK-NEXT: vsra.vi v8, v8, 1
353 ; CHECK-NEXT: vsrl.vi v9, v8, 15
354 ; CHECK-NEXT: vadd.vv v8, v8, v9
356 %vc = sdiv <vscale x 2 x i16> %va, splat (i16 -7)
357 ret <vscale x 2 x i16> %vc
360 define <vscale x 4 x i16> @vdiv_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
361 ; CHECK-LABEL: vdiv_vv_nxv4i16:
363 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
364 ; CHECK-NEXT: vdiv.vv v8, v8, v9
366 %vc = sdiv <vscale x 4 x i16> %va, %vb
367 ret <vscale x 4 x i16> %vc
370 define <vscale x 4 x i16> @vdiv_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
371 ; CHECK-LABEL: vdiv_vx_nxv4i16:
373 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
374 ; CHECK-NEXT: vdiv.vx v8, v8, a0
376 %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
377 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
378 %vc = sdiv <vscale x 4 x i16> %va, %splat
379 ret <vscale x 4 x i16> %vc
382 define <vscale x 4 x i16> @vdiv_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
383 ; CHECK-LABEL: vdiv_vi_nxv4i16_0:
385 ; CHECK-NEXT: lui a0, 1048571
386 ; CHECK-NEXT: addi a0, a0, 1755
387 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
388 ; CHECK-NEXT: vmulh.vx v8, v8, a0
389 ; CHECK-NEXT: vsra.vi v8, v8, 1
390 ; CHECK-NEXT: vsrl.vi v9, v8, 15
391 ; CHECK-NEXT: vadd.vv v8, v8, v9
393 %vc = sdiv <vscale x 4 x i16> %va, splat (i16 -7)
394 ret <vscale x 4 x i16> %vc
397 define <vscale x 8 x i16> @vdiv_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
398 ; CHECK-LABEL: vdiv_vv_nxv8i16:
400 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
401 ; CHECK-NEXT: vdiv.vv v8, v8, v10
403 %vc = sdiv <vscale x 8 x i16> %va, %vb
404 ret <vscale x 8 x i16> %vc
407 define <vscale x 8 x i16> @vdiv_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
408 ; CHECK-LABEL: vdiv_vx_nxv8i16:
410 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
411 ; CHECK-NEXT: vdiv.vx v8, v8, a0
413 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
414 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
415 %vc = sdiv <vscale x 8 x i16> %va, %splat
416 ret <vscale x 8 x i16> %vc
419 define <vscale x 8 x i16> @vdiv_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
420 ; CHECK-LABEL: vdiv_vi_nxv8i16_0:
422 ; CHECK-NEXT: lui a0, 1048571
423 ; CHECK-NEXT: addi a0, a0, 1755
424 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
425 ; CHECK-NEXT: vmulh.vx v8, v8, a0
426 ; CHECK-NEXT: vsra.vi v8, v8, 1
427 ; CHECK-NEXT: vsrl.vi v10, v8, 15
428 ; CHECK-NEXT: vadd.vv v8, v8, v10
430 %vc = sdiv <vscale x 8 x i16> %va, splat (i16 -7)
431 ret <vscale x 8 x i16> %vc
434 define <vscale x 16 x i16> @vdiv_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
435 ; CHECK-LABEL: vdiv_vv_nxv16i16:
437 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
438 ; CHECK-NEXT: vdiv.vv v8, v8, v12
440 %vc = sdiv <vscale x 16 x i16> %va, %vb
441 ret <vscale x 16 x i16> %vc
444 define <vscale x 16 x i16> @vdiv_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
445 ; CHECK-LABEL: vdiv_vx_nxv16i16:
447 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
448 ; CHECK-NEXT: vdiv.vx v8, v8, a0
450 %head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
451 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
452 %vc = sdiv <vscale x 16 x i16> %va, %splat
453 ret <vscale x 16 x i16> %vc
456 define <vscale x 16 x i16> @vdiv_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
457 ; CHECK-LABEL: vdiv_vi_nxv16i16_0:
459 ; CHECK-NEXT: lui a0, 1048571
460 ; CHECK-NEXT: addi a0, a0, 1755
461 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
462 ; CHECK-NEXT: vmulh.vx v8, v8, a0
463 ; CHECK-NEXT: vsra.vi v8, v8, 1
464 ; CHECK-NEXT: vsrl.vi v12, v8, 15
465 ; CHECK-NEXT: vadd.vv v8, v8, v12
467 %vc = sdiv <vscale x 16 x i16> %va, splat (i16 -7)
468 ret <vscale x 16 x i16> %vc
471 define <vscale x 32 x i16> @vdiv_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
472 ; CHECK-LABEL: vdiv_vv_nxv32i16:
474 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
475 ; CHECK-NEXT: vdiv.vv v8, v8, v16
477 %vc = sdiv <vscale x 32 x i16> %va, %vb
478 ret <vscale x 32 x i16> %vc
481 define <vscale x 32 x i16> @vdiv_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
482 ; CHECK-LABEL: vdiv_vx_nxv32i16:
484 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
485 ; CHECK-NEXT: vdiv.vx v8, v8, a0
487 %head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
488 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
489 %vc = sdiv <vscale x 32 x i16> %va, %splat
490 ret <vscale x 32 x i16> %vc
493 define <vscale x 32 x i16> @vdiv_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
494 ; CHECK-LABEL: vdiv_vi_nxv32i16_0:
496 ; CHECK-NEXT: lui a0, 1048571
497 ; CHECK-NEXT: addi a0, a0, 1755
498 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
499 ; CHECK-NEXT: vmulh.vx v8, v8, a0
500 ; CHECK-NEXT: vsra.vi v8, v8, 1
501 ; CHECK-NEXT: vsrl.vi v16, v8, 15
502 ; CHECK-NEXT: vadd.vv v8, v8, v16
504 %vc = sdiv <vscale x 32 x i16> %va, splat (i16 -7)
505 ret <vscale x 32 x i16> %vc
508 define <vscale x 1 x i32> @vdiv_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
509 ; CHECK-LABEL: vdiv_vv_nxv1i32:
511 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
512 ; CHECK-NEXT: vdiv.vv v8, v8, v9
514 %vc = sdiv <vscale x 1 x i32> %va, %vb
515 ret <vscale x 1 x i32> %vc
518 define <vscale x 1 x i32> @vdiv_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
519 ; CHECK-LABEL: vdiv_vx_nxv1i32:
521 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
522 ; CHECK-NEXT: vdiv.vx v8, v8, a0
524 %head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
525 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
526 %vc = sdiv <vscale x 1 x i32> %va, %splat
527 ret <vscale x 1 x i32> %vc
530 define <vscale x 1 x i32> @vdiv_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
531 ; RV32-LABEL: vdiv_vi_nxv1i32_0:
533 ; RV32-NEXT: lui a0, 449390
534 ; RV32-NEXT: addi a0, a0, -1171
535 ; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
536 ; RV32-NEXT: vmulh.vx v9, v8, a0
537 ; RV32-NEXT: vsub.vv v8, v9, v8
538 ; RV32-NEXT: vsrl.vi v9, v8, 31
539 ; RV32-NEXT: vsra.vi v8, v8, 2
540 ; RV32-NEXT: vadd.vv v8, v8, v9
543 ; RV64-LABEL: vdiv_vi_nxv1i32_0:
545 ; RV64-NEXT: lui a0, 449390
546 ; RV64-NEXT: addi a0, a0, -1171
547 ; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
548 ; RV64-NEXT: vmulh.vx v9, v8, a0
549 ; RV64-NEXT: vsub.vv v8, v9, v8
550 ; RV64-NEXT: vsra.vi v8, v8, 2
551 ; RV64-NEXT: vsrl.vi v9, v8, 31
552 ; RV64-NEXT: vadd.vv v8, v8, v9
554 %vc = sdiv <vscale x 1 x i32> %va, splat (i32 -7)
555 ret <vscale x 1 x i32> %vc
558 define <vscale x 2 x i32> @vdiv_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
559 ; CHECK-LABEL: vdiv_vv_nxv2i32:
561 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
562 ; CHECK-NEXT: vdiv.vv v8, v8, v9
564 %vc = sdiv <vscale x 2 x i32> %va, %vb
565 ret <vscale x 2 x i32> %vc
568 define <vscale x 2 x i32> @vdiv_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
569 ; CHECK-LABEL: vdiv_vx_nxv2i32:
571 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
572 ; CHECK-NEXT: vdiv.vx v8, v8, a0
574 %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
575 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
576 %vc = sdiv <vscale x 2 x i32> %va, %splat
577 ret <vscale x 2 x i32> %vc
580 define <vscale x 2 x i32> @vdiv_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
581 ; RV32-LABEL: vdiv_vi_nxv2i32_0:
583 ; RV32-NEXT: lui a0, 449390
584 ; RV32-NEXT: addi a0, a0, -1171
585 ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
586 ; RV32-NEXT: vmulh.vx v9, v8, a0
587 ; RV32-NEXT: vsub.vv v8, v9, v8
588 ; RV32-NEXT: vsrl.vi v9, v8, 31
589 ; RV32-NEXT: vsra.vi v8, v8, 2
590 ; RV32-NEXT: vadd.vv v8, v8, v9
593 ; RV64-LABEL: vdiv_vi_nxv2i32_0:
595 ; RV64-NEXT: lui a0, 449390
596 ; RV64-NEXT: addi a0, a0, -1171
597 ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
598 ; RV64-NEXT: vmulh.vx v9, v8, a0
599 ; RV64-NEXT: vsub.vv v8, v9, v8
600 ; RV64-NEXT: vsra.vi v8, v8, 2
601 ; RV64-NEXT: vsrl.vi v9, v8, 31
602 ; RV64-NEXT: vadd.vv v8, v8, v9
604 %vc = sdiv <vscale x 2 x i32> %va, splat (i32 -7)
605 ret <vscale x 2 x i32> %vc
608 define <vscale x 4 x i32> @vdiv_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
609 ; CHECK-LABEL: vdiv_vv_nxv4i32:
611 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
612 ; CHECK-NEXT: vdiv.vv v8, v8, v10
614 %vc = sdiv <vscale x 4 x i32> %va, %vb
615 ret <vscale x 4 x i32> %vc
618 define <vscale x 4 x i32> @vdiv_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
619 ; CHECK-LABEL: vdiv_vx_nxv4i32:
621 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
622 ; CHECK-NEXT: vdiv.vx v8, v8, a0
624 %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
625 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
626 %vc = sdiv <vscale x 4 x i32> %va, %splat
627 ret <vscale x 4 x i32> %vc
630 define <vscale x 4 x i32> @vdiv_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
631 ; RV32-LABEL: vdiv_vi_nxv4i32_0:
633 ; RV32-NEXT: lui a0, 449390
634 ; RV32-NEXT: addi a0, a0, -1171
635 ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma
636 ; RV32-NEXT: vmulh.vx v10, v8, a0
637 ; RV32-NEXT: vsub.vv v8, v10, v8
638 ; RV32-NEXT: vsrl.vi v10, v8, 31
639 ; RV32-NEXT: vsra.vi v8, v8, 2
640 ; RV32-NEXT: vadd.vv v8, v8, v10
643 ; RV64-LABEL: vdiv_vi_nxv4i32_0:
645 ; RV64-NEXT: lui a0, 449390
646 ; RV64-NEXT: addi a0, a0, -1171
647 ; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma
648 ; RV64-NEXT: vmulh.vx v10, v8, a0
649 ; RV64-NEXT: vsub.vv v8, v10, v8
650 ; RV64-NEXT: vsra.vi v8, v8, 2
651 ; RV64-NEXT: vsrl.vi v10, v8, 31
652 ; RV64-NEXT: vadd.vv v8, v8, v10
654 %vc = sdiv <vscale x 4 x i32> %va, splat (i32 -7)
655 ret <vscale x 4 x i32> %vc
658 define <vscale x 8 x i32> @vdiv_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
659 ; CHECK-LABEL: vdiv_vv_nxv8i32:
661 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
662 ; CHECK-NEXT: vdiv.vv v8, v8, v12
664 %vc = sdiv <vscale x 8 x i32> %va, %vb
665 ret <vscale x 8 x i32> %vc
668 define <vscale x 8 x i32> @vdiv_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
669 ; CHECK-LABEL: vdiv_vx_nxv8i32:
671 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
672 ; CHECK-NEXT: vdiv.vx v8, v8, a0
674 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
675 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
676 %vc = sdiv <vscale x 8 x i32> %va, %splat
677 ret <vscale x 8 x i32> %vc
680 define <vscale x 8 x i32> @vdiv_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
681 ; RV32-LABEL: vdiv_vi_nxv8i32_0:
683 ; RV32-NEXT: lui a0, 449390
684 ; RV32-NEXT: addi a0, a0, -1171
685 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
686 ; RV32-NEXT: vmulh.vx v12, v8, a0
687 ; RV32-NEXT: vsub.vv v8, v12, v8
688 ; RV32-NEXT: vsrl.vi v12, v8, 31
689 ; RV32-NEXT: vsra.vi v8, v8, 2
690 ; RV32-NEXT: vadd.vv v8, v8, v12
693 ; RV64-LABEL: vdiv_vi_nxv8i32_0:
695 ; RV64-NEXT: lui a0, 449390
696 ; RV64-NEXT: addi a0, a0, -1171
697 ; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma
698 ; RV64-NEXT: vmulh.vx v12, v8, a0
699 ; RV64-NEXT: vsub.vv v8, v12, v8
700 ; RV64-NEXT: vsra.vi v8, v8, 2
701 ; RV64-NEXT: vsrl.vi v12, v8, 31
702 ; RV64-NEXT: vadd.vv v8, v8, v12
704 %vc = sdiv <vscale x 8 x i32> %va, splat (i32 -7)
705 ret <vscale x 8 x i32> %vc
708 define <vscale x 16 x i32> @vdiv_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
709 ; CHECK-LABEL: vdiv_vv_nxv16i32:
711 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
712 ; CHECK-NEXT: vdiv.vv v8, v8, v16
714 %vc = sdiv <vscale x 16 x i32> %va, %vb
715 ret <vscale x 16 x i32> %vc
718 define <vscale x 16 x i32> @vdiv_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
719 ; CHECK-LABEL: vdiv_vx_nxv16i32:
721 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
722 ; CHECK-NEXT: vdiv.vx v8, v8, a0
724 %head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
725 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
726 %vc = sdiv <vscale x 16 x i32> %va, %splat
727 ret <vscale x 16 x i32> %vc
730 define <vscale x 16 x i32> @vdiv_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
731 ; RV32-LABEL: vdiv_vi_nxv16i32_0:
733 ; RV32-NEXT: lui a0, 449390
734 ; RV32-NEXT: addi a0, a0, -1171
735 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
736 ; RV32-NEXT: vmulh.vx v16, v8, a0
737 ; RV32-NEXT: vsub.vv v8, v16, v8
738 ; RV32-NEXT: vsrl.vi v16, v8, 31
739 ; RV32-NEXT: vsra.vi v8, v8, 2
740 ; RV32-NEXT: vadd.vv v8, v8, v16
743 ; RV64-LABEL: vdiv_vi_nxv16i32_0:
745 ; RV64-NEXT: lui a0, 449390
746 ; RV64-NEXT: addi a0, a0, -1171
747 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma
748 ; RV64-NEXT: vmulh.vx v16, v8, a0
749 ; RV64-NEXT: vsub.vv v8, v16, v8
750 ; RV64-NEXT: vsra.vi v8, v8, 2
751 ; RV64-NEXT: vsrl.vi v16, v8, 31
752 ; RV64-NEXT: vadd.vv v8, v8, v16
754 %vc = sdiv <vscale x 16 x i32> %va, splat (i32 -7)
755 ret <vscale x 16 x i32> %vc
758 define <vscale x 1 x i64> @vdiv_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
759 ; CHECK-LABEL: vdiv_vv_nxv1i64:
761 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
762 ; CHECK-NEXT: vdiv.vv v8, v8, v9
764 %vc = sdiv <vscale x 1 x i64> %va, %vb
765 ret <vscale x 1 x i64> %vc
768 define <vscale x 1 x i64> @vdiv_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
769 ; RV32-LABEL: vdiv_vx_nxv1i64:
771 ; RV32-NEXT: addi sp, sp, -16
772 ; RV32-NEXT: .cfi_def_cfa_offset 16
773 ; RV32-NEXT: sw a0, 8(sp)
774 ; RV32-NEXT: sw a1, 12(sp)
775 ; RV32-NEXT: addi a0, sp, 8
776 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
777 ; RV32-NEXT: vlse64.v v9, (a0), zero
778 ; RV32-NEXT: vdiv.vv v8, v8, v9
779 ; RV32-NEXT: addi sp, sp, 16
780 ; RV32-NEXT: .cfi_def_cfa_offset 0
783 ; RV64-LABEL: vdiv_vx_nxv1i64:
785 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
786 ; RV64-NEXT: vdiv.vx v8, v8, a0
788 %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
789 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
790 %vc = sdiv <vscale x 1 x i64> %va, %splat
791 ret <vscale x 1 x i64> %vc
794 define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
795 ; RV32-V-LABEL: vdiv_vi_nxv1i64_0:
797 ; RV32-V-NEXT: addi sp, sp, -16
798 ; RV32-V-NEXT: .cfi_def_cfa_offset 16
799 ; RV32-V-NEXT: lui a0, 748983
800 ; RV32-V-NEXT: lui a1, 898779
801 ; RV32-V-NEXT: addi a0, a0, -586
802 ; RV32-V-NEXT: addi a1, a1, 1755
803 ; RV32-V-NEXT: sw a1, 8(sp)
804 ; RV32-V-NEXT: sw a0, 12(sp)
805 ; RV32-V-NEXT: addi a0, sp, 8
806 ; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma
807 ; RV32-V-NEXT: vlse64.v v9, (a0), zero
808 ; RV32-V-NEXT: li a0, 63
809 ; RV32-V-NEXT: vmulh.vv v8, v8, v9
810 ; RV32-V-NEXT: vsrl.vx v9, v8, a0
811 ; RV32-V-NEXT: vsra.vi v8, v8, 1
812 ; RV32-V-NEXT: vadd.vv v8, v8, v9
813 ; RV32-V-NEXT: addi sp, sp, 16
814 ; RV32-V-NEXT: .cfi_def_cfa_offset 0
817 ; ZVE64X-LABEL: vdiv_vi_nxv1i64_0:
819 ; ZVE64X-NEXT: li a0, -7
820 ; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, ma
821 ; ZVE64X-NEXT: vdiv.vx v8, v8, a0
824 ; RV64-V-LABEL: vdiv_vi_nxv1i64_0:
826 ; RV64-V-NEXT: lui a0, %hi(.LCPI58_0)
827 ; RV64-V-NEXT: ld a0, %lo(.LCPI58_0)(a0)
828 ; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma
829 ; RV64-V-NEXT: vmulh.vx v8, v8, a0
830 ; RV64-V-NEXT: li a0, 63
831 ; RV64-V-NEXT: vsrl.vx v9, v8, a0
832 ; RV64-V-NEXT: vsra.vi v8, v8, 1
833 ; RV64-V-NEXT: vadd.vv v8, v8, v9
835 %vc = sdiv <vscale x 1 x i64> %va, splat (i64 -7)
836 ret <vscale x 1 x i64> %vc
839 define <vscale x 2 x i64> @vdiv_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
840 ; CHECK-LABEL: vdiv_vv_nxv2i64:
842 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
843 ; CHECK-NEXT: vdiv.vv v8, v8, v10
845 %vc = sdiv <vscale x 2 x i64> %va, %vb
846 ret <vscale x 2 x i64> %vc
849 define <vscale x 2 x i64> @vdiv_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
850 ; RV32-LABEL: vdiv_vx_nxv2i64:
852 ; RV32-NEXT: addi sp, sp, -16
853 ; RV32-NEXT: .cfi_def_cfa_offset 16
854 ; RV32-NEXT: sw a0, 8(sp)
855 ; RV32-NEXT: sw a1, 12(sp)
856 ; RV32-NEXT: addi a0, sp, 8
857 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
858 ; RV32-NEXT: vlse64.v v10, (a0), zero
859 ; RV32-NEXT: vdiv.vv v8, v8, v10
860 ; RV32-NEXT: addi sp, sp, 16
861 ; RV32-NEXT: .cfi_def_cfa_offset 0
864 ; RV64-LABEL: vdiv_vx_nxv2i64:
866 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
867 ; RV64-NEXT: vdiv.vx v8, v8, a0
869 %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
870 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
871 %vc = sdiv <vscale x 2 x i64> %va, %splat
872 ret <vscale x 2 x i64> %vc
875 define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
876 ; RV32-V-LABEL: vdiv_vi_nxv2i64_0:
878 ; RV32-V-NEXT: addi sp, sp, -16
879 ; RV32-V-NEXT: .cfi_def_cfa_offset 16
880 ; RV32-V-NEXT: lui a0, 748983
881 ; RV32-V-NEXT: lui a1, 898779
882 ; RV32-V-NEXT: addi a0, a0, -586
883 ; RV32-V-NEXT: addi a1, a1, 1755
884 ; RV32-V-NEXT: sw a1, 8(sp)
885 ; RV32-V-NEXT: sw a0, 12(sp)
886 ; RV32-V-NEXT: addi a0, sp, 8
887 ; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma
888 ; RV32-V-NEXT: vlse64.v v10, (a0), zero
889 ; RV32-V-NEXT: li a0, 63
890 ; RV32-V-NEXT: vmulh.vv v8, v8, v10
891 ; RV32-V-NEXT: vsrl.vx v10, v8, a0
892 ; RV32-V-NEXT: vsra.vi v8, v8, 1
893 ; RV32-V-NEXT: vadd.vv v8, v8, v10
894 ; RV32-V-NEXT: addi sp, sp, 16
895 ; RV32-V-NEXT: .cfi_def_cfa_offset 0
898 ; ZVE64X-LABEL: vdiv_vi_nxv2i64_0:
900 ; ZVE64X-NEXT: li a0, -7
901 ; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, ma
902 ; ZVE64X-NEXT: vdiv.vx v8, v8, a0
905 ; RV64-V-LABEL: vdiv_vi_nxv2i64_0:
907 ; RV64-V-NEXT: lui a0, %hi(.LCPI61_0)
908 ; RV64-V-NEXT: ld a0, %lo(.LCPI61_0)(a0)
909 ; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma
910 ; RV64-V-NEXT: vmulh.vx v8, v8, a0
911 ; RV64-V-NEXT: li a0, 63
912 ; RV64-V-NEXT: vsrl.vx v10, v8, a0
913 ; RV64-V-NEXT: vsra.vi v8, v8, 1
914 ; RV64-V-NEXT: vadd.vv v8, v8, v10
916 %vc = sdiv <vscale x 2 x i64> %va, splat (i64 -7)
917 ret <vscale x 2 x i64> %vc
920 define <vscale x 4 x i64> @vdiv_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
921 ; CHECK-LABEL: vdiv_vv_nxv4i64:
923 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
924 ; CHECK-NEXT: vdiv.vv v8, v8, v12
926 %vc = sdiv <vscale x 4 x i64> %va, %vb
927 ret <vscale x 4 x i64> %vc
930 define <vscale x 4 x i64> @vdiv_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
931 ; RV32-LABEL: vdiv_vx_nxv4i64:
933 ; RV32-NEXT: addi sp, sp, -16
934 ; RV32-NEXT: .cfi_def_cfa_offset 16
935 ; RV32-NEXT: sw a0, 8(sp)
936 ; RV32-NEXT: sw a1, 12(sp)
937 ; RV32-NEXT: addi a0, sp, 8
938 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
939 ; RV32-NEXT: vlse64.v v12, (a0), zero
940 ; RV32-NEXT: vdiv.vv v8, v8, v12
941 ; RV32-NEXT: addi sp, sp, 16
942 ; RV32-NEXT: .cfi_def_cfa_offset 0
945 ; RV64-LABEL: vdiv_vx_nxv4i64:
947 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma
948 ; RV64-NEXT: vdiv.vx v8, v8, a0
950 %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
951 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
952 %vc = sdiv <vscale x 4 x i64> %va, %splat
953 ret <vscale x 4 x i64> %vc
956 define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
957 ; RV32-V-LABEL: vdiv_vi_nxv4i64_0:
959 ; RV32-V-NEXT: addi sp, sp, -16
960 ; RV32-V-NEXT: .cfi_def_cfa_offset 16
961 ; RV32-V-NEXT: lui a0, 748983
962 ; RV32-V-NEXT: lui a1, 898779
963 ; RV32-V-NEXT: addi a0, a0, -586
964 ; RV32-V-NEXT: addi a1, a1, 1755
965 ; RV32-V-NEXT: sw a1, 8(sp)
966 ; RV32-V-NEXT: sw a0, 12(sp)
967 ; RV32-V-NEXT: addi a0, sp, 8
968 ; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma
969 ; RV32-V-NEXT: vlse64.v v12, (a0), zero
970 ; RV32-V-NEXT: li a0, 63
971 ; RV32-V-NEXT: vmulh.vv v8, v8, v12
972 ; RV32-V-NEXT: vsrl.vx v12, v8, a0
973 ; RV32-V-NEXT: vsra.vi v8, v8, 1
974 ; RV32-V-NEXT: vadd.vv v8, v8, v12
975 ; RV32-V-NEXT: addi sp, sp, 16
976 ; RV32-V-NEXT: .cfi_def_cfa_offset 0
979 ; ZVE64X-LABEL: vdiv_vi_nxv4i64_0:
981 ; ZVE64X-NEXT: li a0, -7
982 ; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, ma
983 ; ZVE64X-NEXT: vdiv.vx v8, v8, a0
986 ; RV64-V-LABEL: vdiv_vi_nxv4i64_0:
988 ; RV64-V-NEXT: lui a0, %hi(.LCPI64_0)
989 ; RV64-V-NEXT: ld a0, %lo(.LCPI64_0)(a0)
990 ; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma
991 ; RV64-V-NEXT: vmulh.vx v8, v8, a0
992 ; RV64-V-NEXT: li a0, 63
993 ; RV64-V-NEXT: vsrl.vx v12, v8, a0
994 ; RV64-V-NEXT: vsra.vi v8, v8, 1
995 ; RV64-V-NEXT: vadd.vv v8, v8, v12
997 %vc = sdiv <vscale x 4 x i64> %va, splat (i64 -7)
998 ret <vscale x 4 x i64> %vc
1001 define <vscale x 8 x i64> @vdiv_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
1002 ; CHECK-LABEL: vdiv_vv_nxv8i64:
1004 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1005 ; CHECK-NEXT: vdiv.vv v8, v8, v16
1007 %vc = sdiv <vscale x 8 x i64> %va, %vb
1008 ret <vscale x 8 x i64> %vc
1011 define <vscale x 8 x i64> @vdiv_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
1012 ; RV32-LABEL: vdiv_vx_nxv8i64:
1014 ; RV32-NEXT: addi sp, sp, -16
1015 ; RV32-NEXT: .cfi_def_cfa_offset 16
1016 ; RV32-NEXT: sw a0, 8(sp)
1017 ; RV32-NEXT: sw a1, 12(sp)
1018 ; RV32-NEXT: addi a0, sp, 8
1019 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1020 ; RV32-NEXT: vlse64.v v16, (a0), zero
1021 ; RV32-NEXT: vdiv.vv v8, v8, v16
1022 ; RV32-NEXT: addi sp, sp, 16
1023 ; RV32-NEXT: .cfi_def_cfa_offset 0
1026 ; RV64-LABEL: vdiv_vx_nxv8i64:
1028 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1029 ; RV64-NEXT: vdiv.vx v8, v8, a0
1031 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1032 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1033 %vc = sdiv <vscale x 8 x i64> %va, %splat
1034 ret <vscale x 8 x i64> %vc
1037 define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
1038 ; RV32-V-LABEL: vdiv_vi_nxv8i64_0:
1040 ; RV32-V-NEXT: addi sp, sp, -16
1041 ; RV32-V-NEXT: .cfi_def_cfa_offset 16
1042 ; RV32-V-NEXT: lui a0, 748983
1043 ; RV32-V-NEXT: lui a1, 898779
1044 ; RV32-V-NEXT: addi a0, a0, -586
1045 ; RV32-V-NEXT: addi a1, a1, 1755
1046 ; RV32-V-NEXT: sw a1, 8(sp)
1047 ; RV32-V-NEXT: sw a0, 12(sp)
1048 ; RV32-V-NEXT: addi a0, sp, 8
1049 ; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1050 ; RV32-V-NEXT: vlse64.v v16, (a0), zero
1051 ; RV32-V-NEXT: li a0, 63
1052 ; RV32-V-NEXT: vmulh.vv v8, v8, v16
1053 ; RV32-V-NEXT: vsrl.vx v16, v8, a0
1054 ; RV32-V-NEXT: vsra.vi v8, v8, 1
1055 ; RV32-V-NEXT: vadd.vv v8, v8, v16
1056 ; RV32-V-NEXT: addi sp, sp, 16
1057 ; RV32-V-NEXT: .cfi_def_cfa_offset 0
1060 ; ZVE64X-LABEL: vdiv_vi_nxv8i64_0:
1062 ; ZVE64X-NEXT: li a0, -7
1063 ; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1064 ; ZVE64X-NEXT: vdiv.vx v8, v8, a0
1067 ; RV64-V-LABEL: vdiv_vi_nxv8i64_0:
1069 ; RV64-V-NEXT: lui a0, %hi(.LCPI67_0)
1070 ; RV64-V-NEXT: ld a0, %lo(.LCPI67_0)(a0)
1071 ; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1072 ; RV64-V-NEXT: vmulh.vx v8, v8, a0
1073 ; RV64-V-NEXT: li a0, 63
1074 ; RV64-V-NEXT: vsrl.vx v16, v8, a0
1075 ; RV64-V-NEXT: vsra.vi v8, v8, 1
1076 ; RV64-V-NEXT: vadd.vv v8, v8, v16
1078 %vc = sdiv <vscale x 8 x i64> %va, splat (i64 -7)
1079 ret <vscale x 8 x i64> %vc
1082 define <vscale x 8 x i32> @vdiv_vv_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %mask) {
1083 ; CHECK-LABEL: vdiv_vv_mask_nxv8i32:
1085 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1086 ; CHECK-NEXT: vmv.v.i v16, 1
1087 ; CHECK-NEXT: vmerge.vvm v12, v16, v12, v0
1088 ; CHECK-NEXT: vdiv.vv v8, v8, v12
1090 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %vb, <vscale x 8 x i32> splat (i32 1)
1091 %vc = sdiv <vscale x 8 x i32> %va, %vs
1092 ret <vscale x 8 x i32> %vc
1095 define <vscale x 8 x i32> @vdiv_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b, <vscale x 8 x i1> %mask) {
1096 ; CHECK-LABEL: vdiv_vx_mask_nxv8i32:
1098 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
1099 ; CHECK-NEXT: vmv.v.i v12, 1
1100 ; CHECK-NEXT: vmerge.vxm v12, v12, a0, v0
1101 ; CHECK-NEXT: vdiv.vv v8, v8, v12
1103 %head2 = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1104 %splat = shufflevector <vscale x 8 x i32> %head2, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1105 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %splat, <vscale x 8 x i32> splat (i32 1)
1106 %vc = sdiv <vscale x 8 x i32> %va, %vs
1107 ret <vscale x 8 x i32> %vc
1110 define <vscale x 8 x i32> @vdiv_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %mask) {
1111 ; CHECK-LABEL: vdiv_vi_mask_nxv8i32:
1113 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
1114 ; CHECK-NEXT: vmv.v.i v12, 1
1115 ; CHECK-NEXT: vmerge.vim v12, v12, 7, v0
1116 ; CHECK-NEXT: vdiv.vv v8, v8, v12
1118 %vs = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> splat (i32 7), <vscale x 8 x i32> splat (i32 1)
1119 %vc = sdiv <vscale x 8 x i32> %va, %vs
1120 ret <vscale x 8 x i32> %vc