1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-V
3 ; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZVE64X
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-V
5 ; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZVE64X
7 define <vscale x 1 x i8> @vrem_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
8 ; CHECK-LABEL: vrem_vv_nxv1i8:
10 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
11 ; CHECK-NEXT: vrem.vv v8, v8, v9
13 %vc = srem <vscale x 1 x i8> %va, %vb
14 ret <vscale x 1 x i8> %vc
17 define <vscale x 1 x i8> @vrem_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
18 ; CHECK-LABEL: vrem_vx_nxv1i8:
20 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
21 ; CHECK-NEXT: vrem.vx v8, v8, a0
23 %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
24 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
25 %vc = srem <vscale x 1 x i8> %va, %splat
26 ret <vscale x 1 x i8> %vc
29 define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
30 ; CHECK-LABEL: vrem_vi_nxv1i8_0:
32 ; CHECK-NEXT: li a0, 109
33 ; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
34 ; CHECK-NEXT: vmulh.vx v9, v8, a0
35 ; CHECK-NEXT: vsub.vv v9, v9, v8
36 ; CHECK-NEXT: vsra.vi v9, v9, 2
37 ; CHECK-NEXT: vsrl.vi v10, v9, 7
38 ; CHECK-NEXT: vadd.vv v9, v9, v10
39 ; CHECK-NEXT: li a0, -7
40 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
42 %head = insertelement <vscale x 1 x i8> poison, i8 -7, i32 0
43 %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
44 %vc = srem <vscale x 1 x i8> %va, %splat
45 ret <vscale x 1 x i8> %vc
48 define <vscale x 1 x i8> @vrem_vv_nxv1i8_sext_twice(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
49 ; CHECK-LABEL: vrem_vv_nxv1i8_sext_twice:
51 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
52 ; CHECK-NEXT: vrem.vv v8, v8, v9
54 %sext_va = sext <vscale x 1 x i8> %va to <vscale x 1 x i16>
55 %sext_vb = sext <vscale x 1 x i8> %vb to <vscale x 1 x i16>
56 %vc_ext = srem <vscale x 1 x i16> %sext_va, %sext_vb
57 %vc = trunc <vscale x 1 x i16> %vc_ext to <vscale x 1 x i8>
58 ret <vscale x 1 x i8> %vc
61 define <vscale x 2 x i8> @vrem_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
62 ; CHECK-LABEL: vrem_vv_nxv2i8:
64 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
65 ; CHECK-NEXT: vrem.vv v8, v8, v9
67 %vc = srem <vscale x 2 x i8> %va, %vb
68 ret <vscale x 2 x i8> %vc
71 define <vscale x 2 x i8> @vrem_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
72 ; CHECK-LABEL: vrem_vx_nxv2i8:
74 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
75 ; CHECK-NEXT: vrem.vx v8, v8, a0
77 %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
78 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
79 %vc = srem <vscale x 2 x i8> %va, %splat
80 ret <vscale x 2 x i8> %vc
83 define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
84 ; CHECK-LABEL: vrem_vi_nxv2i8_0:
86 ; CHECK-NEXT: li a0, 109
87 ; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
88 ; CHECK-NEXT: vmulh.vx v9, v8, a0
89 ; CHECK-NEXT: vsub.vv v9, v9, v8
90 ; CHECK-NEXT: vsra.vi v9, v9, 2
91 ; CHECK-NEXT: vsrl.vi v10, v9, 7
92 ; CHECK-NEXT: vadd.vv v9, v9, v10
93 ; CHECK-NEXT: li a0, -7
94 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
96 %head = insertelement <vscale x 2 x i8> poison, i8 -7, i32 0
97 %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
98 %vc = srem <vscale x 2 x i8> %va, %splat
99 ret <vscale x 2 x i8> %vc
102 define <vscale x 2 x i8> @vrem_vv_nxv2i8_sext_twice(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
103 ; CHECK-LABEL: vrem_vv_nxv2i8_sext_twice:
105 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
106 ; CHECK-NEXT: vrem.vv v8, v8, v9
108 %sext_va = sext <vscale x 2 x i8> %va to <vscale x 2 x i16>
109 %sext_vb = sext <vscale x 2 x i8> %vb to <vscale x 2 x i16>
110 %vc_ext = srem <vscale x 2 x i16> %sext_va, %sext_vb
111 %vc = trunc <vscale x 2 x i16> %vc_ext to <vscale x 2 x i8>
112 ret <vscale x 2 x i8> %vc
115 define <vscale x 4 x i8> @vrem_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
116 ; CHECK-LABEL: vrem_vv_nxv4i8:
118 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
119 ; CHECK-NEXT: vrem.vv v8, v8, v9
121 %vc = srem <vscale x 4 x i8> %va, %vb
122 ret <vscale x 4 x i8> %vc
125 define <vscale x 4 x i8> @vrem_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
126 ; CHECK-LABEL: vrem_vx_nxv4i8:
128 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
129 ; CHECK-NEXT: vrem.vx v8, v8, a0
131 %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
132 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
133 %vc = srem <vscale x 4 x i8> %va, %splat
134 ret <vscale x 4 x i8> %vc
137 define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
138 ; CHECK-LABEL: vrem_vi_nxv4i8_0:
140 ; CHECK-NEXT: li a0, 109
141 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
142 ; CHECK-NEXT: vmulh.vx v9, v8, a0
143 ; CHECK-NEXT: vsub.vv v9, v9, v8
144 ; CHECK-NEXT: vsra.vi v9, v9, 2
145 ; CHECK-NEXT: vsrl.vi v10, v9, 7
146 ; CHECK-NEXT: vadd.vv v9, v9, v10
147 ; CHECK-NEXT: li a0, -7
148 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
150 %head = insertelement <vscale x 4 x i8> poison, i8 -7, i32 0
151 %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
152 %vc = srem <vscale x 4 x i8> %va, %splat
153 ret <vscale x 4 x i8> %vc
156 define <vscale x 4 x i8> @vrem_vv_nxv4i8_sext_twice(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
157 ; CHECK-LABEL: vrem_vv_nxv4i8_sext_twice:
159 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
160 ; CHECK-NEXT: vrem.vv v8, v8, v9
162 %sext_va = sext <vscale x 4 x i8> %va to <vscale x 4 x i16>
163 %sext_vb = sext <vscale x 4 x i8> %vb to <vscale x 4 x i16>
164 %vc_ext = srem <vscale x 4 x i16> %sext_va, %sext_vb
165 %vc = trunc <vscale x 4 x i16> %vc_ext to <vscale x 4 x i8>
166 ret <vscale x 4 x i8> %vc
169 define <vscale x 8 x i8> @vrem_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
170 ; CHECK-LABEL: vrem_vv_nxv8i8:
172 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
173 ; CHECK-NEXT: vrem.vv v8, v8, v9
175 %vc = srem <vscale x 8 x i8> %va, %vb
176 ret <vscale x 8 x i8> %vc
179 define <vscale x 8 x i8> @vrem_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
180 ; CHECK-LABEL: vrem_vx_nxv8i8:
182 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
183 ; CHECK-NEXT: vrem.vx v8, v8, a0
185 %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
186 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
187 %vc = srem <vscale x 8 x i8> %va, %splat
188 ret <vscale x 8 x i8> %vc
191 define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
192 ; CHECK-LABEL: vrem_vi_nxv8i8_0:
194 ; CHECK-NEXT: li a0, 109
195 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
196 ; CHECK-NEXT: vmulh.vx v9, v8, a0
197 ; CHECK-NEXT: vsub.vv v9, v9, v8
198 ; CHECK-NEXT: vsra.vi v9, v9, 2
199 ; CHECK-NEXT: vsrl.vi v10, v9, 7
200 ; CHECK-NEXT: vadd.vv v9, v9, v10
201 ; CHECK-NEXT: li a0, -7
202 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
204 %head = insertelement <vscale x 8 x i8> poison, i8 -7, i32 0
205 %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
206 %vc = srem <vscale x 8 x i8> %va, %splat
207 ret <vscale x 8 x i8> %vc
210 define <vscale x 8 x i8> @vrem_vv_nxv8i8_sext_twice(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
211 ; CHECK-LABEL: vrem_vv_nxv8i8_sext_twice:
213 ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
214 ; CHECK-NEXT: vrem.vv v8, v8, v9
216 %sext_va = sext <vscale x 8 x i8> %va to <vscale x 8 x i16>
217 %sext_vb = sext <vscale x 8 x i8> %vb to <vscale x 8 x i16>
218 %vc_ext = srem <vscale x 8 x i16> %sext_va, %sext_vb
219 %vc = trunc <vscale x 8 x i16> %vc_ext to <vscale x 8 x i8>
220 ret <vscale x 8 x i8> %vc
223 define <vscale x 16 x i8> @vrem_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
224 ; CHECK-LABEL: vrem_vv_nxv16i8:
226 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
227 ; CHECK-NEXT: vrem.vv v8, v8, v10
229 %vc = srem <vscale x 16 x i8> %va, %vb
230 ret <vscale x 16 x i8> %vc
233 define <vscale x 16 x i8> @vrem_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
234 ; CHECK-LABEL: vrem_vx_nxv16i8:
236 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
237 ; CHECK-NEXT: vrem.vx v8, v8, a0
239 %head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
240 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
241 %vc = srem <vscale x 16 x i8> %va, %splat
242 ret <vscale x 16 x i8> %vc
245 define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
246 ; CHECK-LABEL: vrem_vi_nxv16i8_0:
248 ; CHECK-NEXT: li a0, 109
249 ; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
250 ; CHECK-NEXT: vmulh.vx v10, v8, a0
251 ; CHECK-NEXT: vsub.vv v10, v10, v8
252 ; CHECK-NEXT: vsra.vi v10, v10, 2
253 ; CHECK-NEXT: vsrl.vi v12, v10, 7
254 ; CHECK-NEXT: vadd.vv v10, v10, v12
255 ; CHECK-NEXT: li a0, -7
256 ; CHECK-NEXT: vnmsac.vx v8, a0, v10
258 %head = insertelement <vscale x 16 x i8> poison, i8 -7, i32 0
259 %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
260 %vc = srem <vscale x 16 x i8> %va, %splat
261 ret <vscale x 16 x i8> %vc
264 define <vscale x 16 x i8> @vrem_vv_nxv16i8_sext_twice(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
265 ; CHECK-LABEL: vrem_vv_nxv16i8_sext_twice:
267 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
268 ; CHECK-NEXT: vrem.vv v8, v8, v10
270 %sext_va = sext <vscale x 16 x i8> %va to <vscale x 16 x i16>
271 %sext_vb = sext <vscale x 16 x i8> %vb to <vscale x 16 x i16>
272 %vc_ext = srem <vscale x 16 x i16> %sext_va, %sext_vb
273 %vc = trunc <vscale x 16 x i16> %vc_ext to <vscale x 16 x i8>
274 ret <vscale x 16 x i8> %vc
277 define <vscale x 32 x i8> @vrem_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
278 ; CHECK-LABEL: vrem_vv_nxv32i8:
280 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
281 ; CHECK-NEXT: vrem.vv v8, v8, v12
283 %vc = srem <vscale x 32 x i8> %va, %vb
284 ret <vscale x 32 x i8> %vc
287 define <vscale x 32 x i8> @vrem_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
288 ; CHECK-LABEL: vrem_vx_nxv32i8:
290 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
291 ; CHECK-NEXT: vrem.vx v8, v8, a0
293 %head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
294 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
295 %vc = srem <vscale x 32 x i8> %va, %splat
296 ret <vscale x 32 x i8> %vc
299 define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
300 ; CHECK-LABEL: vrem_vi_nxv32i8_0:
302 ; CHECK-NEXT: li a0, 109
303 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
304 ; CHECK-NEXT: vmulh.vx v12, v8, a0
305 ; CHECK-NEXT: vsub.vv v12, v12, v8
306 ; CHECK-NEXT: vsra.vi v12, v12, 2
307 ; CHECK-NEXT: vsrl.vi v16, v12, 7
308 ; CHECK-NEXT: vadd.vv v12, v12, v16
309 ; CHECK-NEXT: li a0, -7
310 ; CHECK-NEXT: vnmsac.vx v8, a0, v12
312 %head = insertelement <vscale x 32 x i8> poison, i8 -7, i32 0
313 %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
314 %vc = srem <vscale x 32 x i8> %va, %splat
315 ret <vscale x 32 x i8> %vc
318 define <vscale x 32 x i8> @vrem_vv_nxv32i8_sext_twice(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
319 ; CHECK-LABEL: vrem_vv_nxv32i8_sext_twice:
321 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
322 ; CHECK-NEXT: vrem.vv v8, v8, v12
324 %sext_va = sext <vscale x 32 x i8> %va to <vscale x 32 x i16>
325 %sext_vb = sext <vscale x 32 x i8> %vb to <vscale x 32 x i16>
326 %vc_ext = srem <vscale x 32 x i16> %sext_va, %sext_vb
327 %vc = trunc <vscale x 32 x i16> %vc_ext to <vscale x 32 x i8>
328 ret <vscale x 32 x i8> %vc
331 define <vscale x 64 x i8> @vrem_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
332 ; CHECK-LABEL: vrem_vv_nxv64i8:
334 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
335 ; CHECK-NEXT: vrem.vv v8, v8, v16
337 %vc = srem <vscale x 64 x i8> %va, %vb
338 ret <vscale x 64 x i8> %vc
341 define <vscale x 64 x i8> @vrem_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
342 ; CHECK-LABEL: vrem_vx_nxv64i8:
344 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
345 ; CHECK-NEXT: vrem.vx v8, v8, a0
347 %head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
348 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
349 %vc = srem <vscale x 64 x i8> %va, %splat
350 ret <vscale x 64 x i8> %vc
353 define <vscale x 64 x i8> @vrem_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
354 ; CHECK-LABEL: vrem_vi_nxv64i8_0:
356 ; CHECK-NEXT: li a0, 109
357 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
358 ; CHECK-NEXT: vmulh.vx v16, v8, a0
359 ; CHECK-NEXT: vsub.vv v16, v16, v8
360 ; CHECK-NEXT: vsra.vi v16, v16, 2
361 ; CHECK-NEXT: vsrl.vi v24, v16, 7
362 ; CHECK-NEXT: vadd.vv v16, v16, v24
363 ; CHECK-NEXT: li a0, -7
364 ; CHECK-NEXT: vnmsac.vx v8, a0, v16
366 %head = insertelement <vscale x 64 x i8> poison, i8 -7, i32 0
367 %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
368 %vc = srem <vscale x 64 x i8> %va, %splat
369 ret <vscale x 64 x i8> %vc
372 define <vscale x 1 x i16> @vrem_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
373 ; CHECK-LABEL: vrem_vv_nxv1i16:
375 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
376 ; CHECK-NEXT: vrem.vv v8, v8, v9
378 %vc = srem <vscale x 1 x i16> %va, %vb
379 ret <vscale x 1 x i16> %vc
382 define <vscale x 1 x i16> @vrem_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
383 ; CHECK-LABEL: vrem_vx_nxv1i16:
385 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
386 ; CHECK-NEXT: vrem.vx v8, v8, a0
388 %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
389 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
390 %vc = srem <vscale x 1 x i16> %va, %splat
391 ret <vscale x 1 x i16> %vc
394 define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
395 ; CHECK-LABEL: vrem_vi_nxv1i16_0:
397 ; CHECK-NEXT: lui a0, 1048571
398 ; CHECK-NEXT: addi a0, a0, 1755
399 ; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
400 ; CHECK-NEXT: vmulh.vx v9, v8, a0
401 ; CHECK-NEXT: vsra.vi v9, v9, 1
402 ; CHECK-NEXT: vsrl.vi v10, v9, 15
403 ; CHECK-NEXT: vadd.vv v9, v9, v10
404 ; CHECK-NEXT: li a0, -7
405 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
407 %head = insertelement <vscale x 1 x i16> poison, i16 -7, i32 0
408 %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
409 %vc = srem <vscale x 1 x i16> %va, %splat
410 ret <vscale x 1 x i16> %vc
413 define <vscale x 1 x i16> @vrem_vv_nxv1i16_sext_twice(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
414 ; CHECK-LABEL: vrem_vv_nxv1i16_sext_twice:
416 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
417 ; CHECK-NEXT: vrem.vv v8, v8, v9
419 %sext_va = sext <vscale x 1 x i16> %va to <vscale x 1 x i32>
420 %sext_vb = sext <vscale x 1 x i16> %vb to <vscale x 1 x i32>
421 %vc_ext = srem <vscale x 1 x i32> %sext_va, %sext_vb
422 %vc = trunc <vscale x 1 x i32> %vc_ext to <vscale x 1 x i16>
423 ret <vscale x 1 x i16> %vc
426 define <vscale x 2 x i16> @vrem_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
427 ; CHECK-LABEL: vrem_vv_nxv2i16:
429 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
430 ; CHECK-NEXT: vrem.vv v8, v8, v9
432 %vc = srem <vscale x 2 x i16> %va, %vb
433 ret <vscale x 2 x i16> %vc
436 define <vscale x 2 x i16> @vrem_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
437 ; CHECK-LABEL: vrem_vx_nxv2i16:
439 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
440 ; CHECK-NEXT: vrem.vx v8, v8, a0
442 %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
443 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
444 %vc = srem <vscale x 2 x i16> %va, %splat
445 ret <vscale x 2 x i16> %vc
448 define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
449 ; CHECK-LABEL: vrem_vi_nxv2i16_0:
451 ; CHECK-NEXT: lui a0, 1048571
452 ; CHECK-NEXT: addi a0, a0, 1755
453 ; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
454 ; CHECK-NEXT: vmulh.vx v9, v8, a0
455 ; CHECK-NEXT: vsra.vi v9, v9, 1
456 ; CHECK-NEXT: vsrl.vi v10, v9, 15
457 ; CHECK-NEXT: vadd.vv v9, v9, v10
458 ; CHECK-NEXT: li a0, -7
459 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
461 %head = insertelement <vscale x 2 x i16> poison, i16 -7, i32 0
462 %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
463 %vc = srem <vscale x 2 x i16> %va, %splat
464 ret <vscale x 2 x i16> %vc
467 define <vscale x 2 x i16> @vrem_vv_nxv2i16_sext_twice(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
468 ; CHECK-LABEL: vrem_vv_nxv2i16_sext_twice:
470 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
471 ; CHECK-NEXT: vrem.vv v8, v8, v9
473 %sext_va = sext <vscale x 2 x i16> %va to <vscale x 2 x i32>
474 %sext_vb = sext <vscale x 2 x i16> %vb to <vscale x 2 x i32>
475 %vc_ext = srem <vscale x 2 x i32> %sext_va, %sext_vb
476 %vc = trunc <vscale x 2 x i32> %vc_ext to <vscale x 2 x i16>
477 ret <vscale x 2 x i16> %vc
480 define <vscale x 4 x i16> @vrem_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
481 ; CHECK-LABEL: vrem_vv_nxv4i16:
483 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
484 ; CHECK-NEXT: vrem.vv v8, v8, v9
486 %vc = srem <vscale x 4 x i16> %va, %vb
487 ret <vscale x 4 x i16> %vc
490 define <vscale x 4 x i16> @vrem_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
491 ; CHECK-LABEL: vrem_vx_nxv4i16:
493 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
494 ; CHECK-NEXT: vrem.vx v8, v8, a0
496 %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
497 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
498 %vc = srem <vscale x 4 x i16> %va, %splat
499 ret <vscale x 4 x i16> %vc
502 define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
503 ; CHECK-LABEL: vrem_vi_nxv4i16_0:
505 ; CHECK-NEXT: lui a0, 1048571
506 ; CHECK-NEXT: addi a0, a0, 1755
507 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
508 ; CHECK-NEXT: vmulh.vx v9, v8, a0
509 ; CHECK-NEXT: vsra.vi v9, v9, 1
510 ; CHECK-NEXT: vsrl.vi v10, v9, 15
511 ; CHECK-NEXT: vadd.vv v9, v9, v10
512 ; CHECK-NEXT: li a0, -7
513 ; CHECK-NEXT: vnmsac.vx v8, a0, v9
515 %head = insertelement <vscale x 4 x i16> poison, i16 -7, i32 0
516 %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
517 %vc = srem <vscale x 4 x i16> %va, %splat
518 ret <vscale x 4 x i16> %vc
521 define <vscale x 4 x i16> @vrem_vv_nxv4i16_sext_twice(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
522 ; CHECK-LABEL: vrem_vv_nxv4i16_sext_twice:
524 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
525 ; CHECK-NEXT: vrem.vv v8, v8, v9
527 %sext_va = sext <vscale x 4 x i16> %va to <vscale x 4 x i32>
528 %sext_vb = sext <vscale x 4 x i16> %vb to <vscale x 4 x i32>
529 %vc_ext = srem <vscale x 4 x i32> %sext_va, %sext_vb
530 %vc = trunc <vscale x 4 x i32> %vc_ext to <vscale x 4 x i16>
531 ret <vscale x 4 x i16> %vc
534 define <vscale x 8 x i16> @vrem_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
535 ; CHECK-LABEL: vrem_vv_nxv8i16:
537 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
538 ; CHECK-NEXT: vrem.vv v8, v8, v10
540 %vc = srem <vscale x 8 x i16> %va, %vb
541 ret <vscale x 8 x i16> %vc
544 define <vscale x 8 x i16> @vrem_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
545 ; CHECK-LABEL: vrem_vx_nxv8i16:
547 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
548 ; CHECK-NEXT: vrem.vx v8, v8, a0
550 %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
551 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
552 %vc = srem <vscale x 8 x i16> %va, %splat
553 ret <vscale x 8 x i16> %vc
556 define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
557 ; CHECK-LABEL: vrem_vi_nxv8i16_0:
559 ; CHECK-NEXT: lui a0, 1048571
560 ; CHECK-NEXT: addi a0, a0, 1755
561 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
562 ; CHECK-NEXT: vmulh.vx v10, v8, a0
563 ; CHECK-NEXT: vsra.vi v10, v10, 1
564 ; CHECK-NEXT: vsrl.vi v12, v10, 15
565 ; CHECK-NEXT: vadd.vv v10, v10, v12
566 ; CHECK-NEXT: li a0, -7
567 ; CHECK-NEXT: vnmsac.vx v8, a0, v10
569 %head = insertelement <vscale x 8 x i16> poison, i16 -7, i32 0
570 %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
571 %vc = srem <vscale x 8 x i16> %va, %splat
572 ret <vscale x 8 x i16> %vc
575 define <vscale x 8 x i16> @vrem_vv_nxv8i16_sext_twice(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
576 ; CHECK-LABEL: vrem_vv_nxv8i16_sext_twice:
578 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
579 ; CHECK-NEXT: vrem.vv v8, v8, v10
581 %sext_va = sext <vscale x 8 x i16> %va to <vscale x 8 x i32>
582 %sext_vb = sext <vscale x 8 x i16> %vb to <vscale x 8 x i32>
583 %vc_ext = srem <vscale x 8 x i32> %sext_va, %sext_vb
584 %vc = trunc <vscale x 8 x i32> %vc_ext to <vscale x 8 x i16>
585 ret <vscale x 8 x i16> %vc
588 define <vscale x 16 x i16> @vrem_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
589 ; CHECK-LABEL: vrem_vv_nxv16i16:
591 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
592 ; CHECK-NEXT: vrem.vv v8, v8, v12
594 %vc = srem <vscale x 16 x i16> %va, %vb
595 ret <vscale x 16 x i16> %vc
598 define <vscale x 16 x i16> @vrem_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
599 ; CHECK-LABEL: vrem_vx_nxv16i16:
601 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
602 ; CHECK-NEXT: vrem.vx v8, v8, a0
604 %head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
605 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
606 %vc = srem <vscale x 16 x i16> %va, %splat
607 ret <vscale x 16 x i16> %vc
610 define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
611 ; CHECK-LABEL: vrem_vi_nxv16i16_0:
613 ; CHECK-NEXT: lui a0, 1048571
614 ; CHECK-NEXT: addi a0, a0, 1755
615 ; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
616 ; CHECK-NEXT: vmulh.vx v12, v8, a0
617 ; CHECK-NEXT: vsra.vi v12, v12, 1
618 ; CHECK-NEXT: vsrl.vi v16, v12, 15
619 ; CHECK-NEXT: vadd.vv v12, v12, v16
620 ; CHECK-NEXT: li a0, -7
621 ; CHECK-NEXT: vnmsac.vx v8, a0, v12
623 %head = insertelement <vscale x 16 x i16> poison, i16 -7, i32 0
624 %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
625 %vc = srem <vscale x 16 x i16> %va, %splat
626 ret <vscale x 16 x i16> %vc
629 define <vscale x 16 x i16> @vrem_vv_nxv16i16_sext_twice(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
630 ; CHECK-LABEL: vrem_vv_nxv16i16_sext_twice:
632 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
633 ; CHECK-NEXT: vrem.vv v8, v8, v12
635 %sext_va = sext <vscale x 16 x i16> %va to <vscale x 16 x i32>
636 %sext_vb = sext <vscale x 16 x i16> %vb to <vscale x 16 x i32>
637 %vc_ext = srem <vscale x 16 x i32> %sext_va, %sext_vb
638 %vc = trunc <vscale x 16 x i32> %vc_ext to <vscale x 16 x i16>
639 ret <vscale x 16 x i16> %vc
642 define <vscale x 32 x i16> @vrem_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
643 ; CHECK-LABEL: vrem_vv_nxv32i16:
645 ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma
646 ; CHECK-NEXT: vrem.vv v8, v8, v16
648 %vc = srem <vscale x 32 x i16> %va, %vb
649 ret <vscale x 32 x i16> %vc
652 define <vscale x 32 x i16> @vrem_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
653 ; CHECK-LABEL: vrem_vx_nxv32i16:
655 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
656 ; CHECK-NEXT: vrem.vx v8, v8, a0
658 %head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
659 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
660 %vc = srem <vscale x 32 x i16> %va, %splat
661 ret <vscale x 32 x i16> %vc
664 define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
665 ; CHECK-LABEL: vrem_vi_nxv32i16_0:
667 ; CHECK-NEXT: lui a0, 1048571
668 ; CHECK-NEXT: addi a0, a0, 1755
669 ; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
670 ; CHECK-NEXT: vmulh.vx v16, v8, a0
671 ; CHECK-NEXT: vsra.vi v16, v16, 1
672 ; CHECK-NEXT: vsrl.vi v24, v16, 15
673 ; CHECK-NEXT: vadd.vv v16, v16, v24
674 ; CHECK-NEXT: li a0, -7
675 ; CHECK-NEXT: vnmsac.vx v8, a0, v16
677 %head = insertelement <vscale x 32 x i16> poison, i16 -7, i32 0
678 %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
679 %vc = srem <vscale x 32 x i16> %va, %splat
680 ret <vscale x 32 x i16> %vc
683 define <vscale x 1 x i32> @vrem_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
684 ; CHECK-LABEL: vrem_vv_nxv1i32:
686 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
687 ; CHECK-NEXT: vrem.vv v8, v8, v9
689 %vc = srem <vscale x 1 x i32> %va, %vb
690 ret <vscale x 1 x i32> %vc
693 define <vscale x 1 x i32> @vrem_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
694 ; CHECK-LABEL: vrem_vx_nxv1i32:
696 ; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
697 ; CHECK-NEXT: vrem.vx v8, v8, a0
699 %head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
700 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
701 %vc = srem <vscale x 1 x i32> %va, %splat
702 ret <vscale x 1 x i32> %vc
705 define <vscale x 1 x i32> @vrem_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
706 ; RV32-LABEL: vrem_vi_nxv1i32_0:
708 ; RV32-NEXT: lui a0, 449390
709 ; RV32-NEXT: addi a0, a0, -1171
710 ; RV32-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
711 ; RV32-NEXT: vmulh.vx v9, v8, a0
712 ; RV32-NEXT: vsub.vv v9, v9, v8
713 ; RV32-NEXT: vsrl.vi v10, v9, 31
714 ; RV32-NEXT: vsra.vi v9, v9, 2
715 ; RV32-NEXT: vadd.vv v9, v9, v10
716 ; RV32-NEXT: li a0, -7
717 ; RV32-NEXT: vnmsac.vx v8, a0, v9
720 ; RV64-LABEL: vrem_vi_nxv1i32_0:
722 ; RV64-NEXT: lui a0, 449390
723 ; RV64-NEXT: addi a0, a0, -1171
724 ; RV64-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
725 ; RV64-NEXT: vmulh.vx v9, v8, a0
726 ; RV64-NEXT: vsub.vv v9, v9, v8
727 ; RV64-NEXT: vsra.vi v9, v9, 2
728 ; RV64-NEXT: vsrl.vi v10, v9, 31
729 ; RV64-NEXT: vadd.vv v9, v9, v10
730 ; RV64-NEXT: li a0, -7
731 ; RV64-NEXT: vnmsac.vx v8, a0, v9
733 %head = insertelement <vscale x 1 x i32> poison, i32 -7, i32 0
734 %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
735 %vc = srem <vscale x 1 x i32> %va, %splat
736 ret <vscale x 1 x i32> %vc
739 define <vscale x 2 x i32> @vrem_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
740 ; CHECK-LABEL: vrem_vv_nxv2i32:
742 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
743 ; CHECK-NEXT: vrem.vv v8, v8, v9
745 %vc = srem <vscale x 2 x i32> %va, %vb
746 ret <vscale x 2 x i32> %vc
749 define <vscale x 2 x i32> @vrem_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
750 ; CHECK-LABEL: vrem_vx_nxv2i32:
752 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
753 ; CHECK-NEXT: vrem.vx v8, v8, a0
755 %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
756 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
757 %vc = srem <vscale x 2 x i32> %va, %splat
758 ret <vscale x 2 x i32> %vc
761 define <vscale x 2 x i32> @vrem_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
762 ; RV32-LABEL: vrem_vi_nxv2i32_0:
764 ; RV32-NEXT: lui a0, 449390
765 ; RV32-NEXT: addi a0, a0, -1171
766 ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma
767 ; RV32-NEXT: vmulh.vx v9, v8, a0
768 ; RV32-NEXT: vsub.vv v9, v9, v8
769 ; RV32-NEXT: vsrl.vi v10, v9, 31
770 ; RV32-NEXT: vsra.vi v9, v9, 2
771 ; RV32-NEXT: vadd.vv v9, v9, v10
772 ; RV32-NEXT: li a0, -7
773 ; RV32-NEXT: vnmsac.vx v8, a0, v9
776 ; RV64-LABEL: vrem_vi_nxv2i32_0:
778 ; RV64-NEXT: lui a0, 449390
779 ; RV64-NEXT: addi a0, a0, -1171
780 ; RV64-NEXT: vsetvli a1, zero, e32, m1, ta, ma
781 ; RV64-NEXT: vmulh.vx v9, v8, a0
782 ; RV64-NEXT: vsub.vv v9, v9, v8
783 ; RV64-NEXT: vsra.vi v9, v9, 2
784 ; RV64-NEXT: vsrl.vi v10, v9, 31
785 ; RV64-NEXT: vadd.vv v9, v9, v10
786 ; RV64-NEXT: li a0, -7
787 ; RV64-NEXT: vnmsac.vx v8, a0, v9
789 %head = insertelement <vscale x 2 x i32> poison, i32 -7, i32 0
790 %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
791 %vc = srem <vscale x 2 x i32> %va, %splat
792 ret <vscale x 2 x i32> %vc
795 define <vscale x 4 x i32> @vrem_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
796 ; CHECK-LABEL: vrem_vv_nxv4i32:
798 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
799 ; CHECK-NEXT: vrem.vv v8, v8, v10
801 %vc = srem <vscale x 4 x i32> %va, %vb
802 ret <vscale x 4 x i32> %vc
805 define <vscale x 4 x i32> @vrem_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
806 ; CHECK-LABEL: vrem_vx_nxv4i32:
808 ; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
809 ; CHECK-NEXT: vrem.vx v8, v8, a0
811 %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
812 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
813 %vc = srem <vscale x 4 x i32> %va, %splat
814 ret <vscale x 4 x i32> %vc
817 define <vscale x 4 x i32> @vrem_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
818 ; RV32-LABEL: vrem_vi_nxv4i32_0:
820 ; RV32-NEXT: lui a0, 449390
821 ; RV32-NEXT: addi a0, a0, -1171
822 ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma
823 ; RV32-NEXT: vmulh.vx v10, v8, a0
824 ; RV32-NEXT: vsub.vv v10, v10, v8
825 ; RV32-NEXT: vsrl.vi v12, v10, 31
826 ; RV32-NEXT: vsra.vi v10, v10, 2
827 ; RV32-NEXT: vadd.vv v10, v10, v12
828 ; RV32-NEXT: li a0, -7
829 ; RV32-NEXT: vnmsac.vx v8, a0, v10
832 ; RV64-LABEL: vrem_vi_nxv4i32_0:
834 ; RV64-NEXT: lui a0, 449390
835 ; RV64-NEXT: addi a0, a0, -1171
836 ; RV64-NEXT: vsetvli a1, zero, e32, m2, ta, ma
837 ; RV64-NEXT: vmulh.vx v10, v8, a0
838 ; RV64-NEXT: vsub.vv v10, v10, v8
839 ; RV64-NEXT: vsra.vi v10, v10, 2
840 ; RV64-NEXT: vsrl.vi v12, v10, 31
841 ; RV64-NEXT: vadd.vv v10, v10, v12
842 ; RV64-NEXT: li a0, -7
843 ; RV64-NEXT: vnmsac.vx v8, a0, v10
845 %head = insertelement <vscale x 4 x i32> poison, i32 -7, i32 0
846 %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
847 %vc = srem <vscale x 4 x i32> %va, %splat
848 ret <vscale x 4 x i32> %vc
851 define <vscale x 8 x i32> @vrem_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
852 ; CHECK-LABEL: vrem_vv_nxv8i32:
854 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
855 ; CHECK-NEXT: vrem.vv v8, v8, v12
857 %vc = srem <vscale x 8 x i32> %va, %vb
858 ret <vscale x 8 x i32> %vc
861 define <vscale x 8 x i32> @vrem_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
862 ; CHECK-LABEL: vrem_vx_nxv8i32:
864 ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
865 ; CHECK-NEXT: vrem.vx v8, v8, a0
867 %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
868 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
869 %vc = srem <vscale x 8 x i32> %va, %splat
870 ret <vscale x 8 x i32> %vc
873 define <vscale x 8 x i32> @vrem_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
874 ; RV32-LABEL: vrem_vi_nxv8i32_0:
876 ; RV32-NEXT: lui a0, 449390
877 ; RV32-NEXT: addi a0, a0, -1171
878 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
879 ; RV32-NEXT: vmulh.vx v12, v8, a0
880 ; RV32-NEXT: vsub.vv v12, v12, v8
881 ; RV32-NEXT: vsrl.vi v16, v12, 31
882 ; RV32-NEXT: vsra.vi v12, v12, 2
883 ; RV32-NEXT: vadd.vv v12, v12, v16
884 ; RV32-NEXT: li a0, -7
885 ; RV32-NEXT: vnmsac.vx v8, a0, v12
888 ; RV64-LABEL: vrem_vi_nxv8i32_0:
890 ; RV64-NEXT: lui a0, 449390
891 ; RV64-NEXT: addi a0, a0, -1171
892 ; RV64-NEXT: vsetvli a1, zero, e32, m4, ta, ma
893 ; RV64-NEXT: vmulh.vx v12, v8, a0
894 ; RV64-NEXT: vsub.vv v12, v12, v8
895 ; RV64-NEXT: vsra.vi v12, v12, 2
896 ; RV64-NEXT: vsrl.vi v16, v12, 31
897 ; RV64-NEXT: vadd.vv v12, v12, v16
898 ; RV64-NEXT: li a0, -7
899 ; RV64-NEXT: vnmsac.vx v8, a0, v12
901 %head = insertelement <vscale x 8 x i32> poison, i32 -7, i32 0
902 %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
903 %vc = srem <vscale x 8 x i32> %va, %splat
904 ret <vscale x 8 x i32> %vc
907 define <vscale x 16 x i32> @vrem_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
908 ; CHECK-LABEL: vrem_vv_nxv16i32:
910 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
911 ; CHECK-NEXT: vrem.vv v8, v8, v16
913 %vc = srem <vscale x 16 x i32> %va, %vb
914 ret <vscale x 16 x i32> %vc
917 define <vscale x 16 x i32> @vrem_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
918 ; CHECK-LABEL: vrem_vx_nxv16i32:
920 ; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
921 ; CHECK-NEXT: vrem.vx v8, v8, a0
923 %head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
924 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
925 %vc = srem <vscale x 16 x i32> %va, %splat
926 ret <vscale x 16 x i32> %vc
929 define <vscale x 16 x i32> @vrem_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
930 ; RV32-LABEL: vrem_vi_nxv16i32_0:
932 ; RV32-NEXT: lui a0, 449390
933 ; RV32-NEXT: addi a0, a0, -1171
934 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
935 ; RV32-NEXT: vmulh.vx v16, v8, a0
936 ; RV32-NEXT: vsub.vv v16, v16, v8
937 ; RV32-NEXT: vsrl.vi v24, v16, 31
938 ; RV32-NEXT: vsra.vi v16, v16, 2
939 ; RV32-NEXT: vadd.vv v16, v16, v24
940 ; RV32-NEXT: li a0, -7
941 ; RV32-NEXT: vnmsac.vx v8, a0, v16
944 ; RV64-LABEL: vrem_vi_nxv16i32_0:
946 ; RV64-NEXT: lui a0, 449390
947 ; RV64-NEXT: addi a0, a0, -1171
948 ; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma
949 ; RV64-NEXT: vmulh.vx v16, v8, a0
950 ; RV64-NEXT: vsub.vv v16, v16, v8
951 ; RV64-NEXT: vsra.vi v16, v16, 2
952 ; RV64-NEXT: vsrl.vi v24, v16, 31
953 ; RV64-NEXT: vadd.vv v16, v16, v24
954 ; RV64-NEXT: li a0, -7
955 ; RV64-NEXT: vnmsac.vx v8, a0, v16
957 %head = insertelement <vscale x 16 x i32> poison, i32 -7, i32 0
958 %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
959 %vc = srem <vscale x 16 x i32> %va, %splat
960 ret <vscale x 16 x i32> %vc
963 define <vscale x 1 x i64> @vrem_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
964 ; CHECK-LABEL: vrem_vv_nxv1i64:
966 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
967 ; CHECK-NEXT: vrem.vv v8, v8, v9
969 %vc = srem <vscale x 1 x i64> %va, %vb
970 ret <vscale x 1 x i64> %vc
973 define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
974 ; RV32-LABEL: vrem_vx_nxv1i64:
976 ; RV32-NEXT: addi sp, sp, -16
977 ; RV32-NEXT: .cfi_def_cfa_offset 16
978 ; RV32-NEXT: sw a1, 12(sp)
979 ; RV32-NEXT: sw a0, 8(sp)
980 ; RV32-NEXT: addi a0, sp, 8
981 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
982 ; RV32-NEXT: vlse64.v v9, (a0), zero
983 ; RV32-NEXT: vrem.vv v8, v8, v9
984 ; RV32-NEXT: addi sp, sp, 16
987 ; RV64-LABEL: vrem_vx_nxv1i64:
989 ; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
990 ; RV64-NEXT: vrem.vx v8, v8, a0
992 %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
993 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
994 %vc = srem <vscale x 1 x i64> %va, %splat
995 ret <vscale x 1 x i64> %vc
998 define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
999 ; RV32-V-LABEL: vrem_vi_nxv1i64_0:
1001 ; RV32-V-NEXT: addi sp, sp, -16
1002 ; RV32-V-NEXT: .cfi_def_cfa_offset 16
1003 ; RV32-V-NEXT: lui a0, 748983
1004 ; RV32-V-NEXT: addi a0, a0, -586
1005 ; RV32-V-NEXT: sw a0, 12(sp)
1006 ; RV32-V-NEXT: lui a0, 898779
1007 ; RV32-V-NEXT: addi a0, a0, 1755
1008 ; RV32-V-NEXT: sw a0, 8(sp)
1009 ; RV32-V-NEXT: addi a0, sp, 8
1010 ; RV32-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1011 ; RV32-V-NEXT: vlse64.v v9, (a0), zero
1012 ; RV32-V-NEXT: vmulh.vv v9, v8, v9
1013 ; RV32-V-NEXT: li a0, 63
1014 ; RV32-V-NEXT: vsrl.vx v10, v9, a0
1015 ; RV32-V-NEXT: vsra.vi v9, v9, 1
1016 ; RV32-V-NEXT: vadd.vv v9, v9, v10
1017 ; RV32-V-NEXT: li a0, -7
1018 ; RV32-V-NEXT: vnmsac.vx v8, a0, v9
1019 ; RV32-V-NEXT: addi sp, sp, 16
1022 ; ZVE64X-LABEL: vrem_vi_nxv1i64_0:
1024 ; ZVE64X-NEXT: li a0, -7
1025 ; ZVE64X-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1026 ; ZVE64X-NEXT: vrem.vx v8, v8, a0
1029 ; RV64-V-LABEL: vrem_vi_nxv1i64_0:
1031 ; RV64-V-NEXT: lui a0, %hi(.LCPI67_0)
1032 ; RV64-V-NEXT: ld a0, %lo(.LCPI67_0)(a0)
1033 ; RV64-V-NEXT: vsetvli a1, zero, e64, m1, ta, ma
1034 ; RV64-V-NEXT: vmulh.vx v9, v8, a0
1035 ; RV64-V-NEXT: li a0, 63
1036 ; RV64-V-NEXT: vsrl.vx v10, v9, a0
1037 ; RV64-V-NEXT: vsra.vi v9, v9, 1
1038 ; RV64-V-NEXT: vadd.vv v9, v9, v10
1039 ; RV64-V-NEXT: li a0, -7
1040 ; RV64-V-NEXT: vnmsac.vx v8, a0, v9
1042 %head = insertelement <vscale x 1 x i64> poison, i64 -7, i32 0
1043 %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
1044 %vc = srem <vscale x 1 x i64> %va, %splat
1045 ret <vscale x 1 x i64> %vc
1048 define <vscale x 2 x i64> @vrem_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
1049 ; CHECK-LABEL: vrem_vv_nxv2i64:
1051 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
1052 ; CHECK-NEXT: vrem.vv v8, v8, v10
1054 %vc = srem <vscale x 2 x i64> %va, %vb
1055 ret <vscale x 2 x i64> %vc
1058 define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
1059 ; RV32-LABEL: vrem_vx_nxv2i64:
1061 ; RV32-NEXT: addi sp, sp, -16
1062 ; RV32-NEXT: .cfi_def_cfa_offset 16
1063 ; RV32-NEXT: sw a1, 12(sp)
1064 ; RV32-NEXT: sw a0, 8(sp)
1065 ; RV32-NEXT: addi a0, sp, 8
1066 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1067 ; RV32-NEXT: vlse64.v v10, (a0), zero
1068 ; RV32-NEXT: vrem.vv v8, v8, v10
1069 ; RV32-NEXT: addi sp, sp, 16
1072 ; RV64-LABEL: vrem_vx_nxv2i64:
1074 ; RV64-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1075 ; RV64-NEXT: vrem.vx v8, v8, a0
1077 %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1078 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1079 %vc = srem <vscale x 2 x i64> %va, %splat
1080 ret <vscale x 2 x i64> %vc
1083 define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
1084 ; RV32-V-LABEL: vrem_vi_nxv2i64_0:
1086 ; RV32-V-NEXT: addi sp, sp, -16
1087 ; RV32-V-NEXT: .cfi_def_cfa_offset 16
1088 ; RV32-V-NEXT: lui a0, 748983
1089 ; RV32-V-NEXT: addi a0, a0, -586
1090 ; RV32-V-NEXT: sw a0, 12(sp)
1091 ; RV32-V-NEXT: lui a0, 898779
1092 ; RV32-V-NEXT: addi a0, a0, 1755
1093 ; RV32-V-NEXT: sw a0, 8(sp)
1094 ; RV32-V-NEXT: addi a0, sp, 8
1095 ; RV32-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1096 ; RV32-V-NEXT: vlse64.v v10, (a0), zero
1097 ; RV32-V-NEXT: vmulh.vv v10, v8, v10
1098 ; RV32-V-NEXT: li a0, 63
1099 ; RV32-V-NEXT: vsrl.vx v12, v10, a0
1100 ; RV32-V-NEXT: vsra.vi v10, v10, 1
1101 ; RV32-V-NEXT: vadd.vv v10, v10, v12
1102 ; RV32-V-NEXT: li a0, -7
1103 ; RV32-V-NEXT: vnmsac.vx v8, a0, v10
1104 ; RV32-V-NEXT: addi sp, sp, 16
1107 ; ZVE64X-LABEL: vrem_vi_nxv2i64_0:
1109 ; ZVE64X-NEXT: li a0, -7
1110 ; ZVE64X-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1111 ; ZVE64X-NEXT: vrem.vx v8, v8, a0
1114 ; RV64-V-LABEL: vrem_vi_nxv2i64_0:
1116 ; RV64-V-NEXT: lui a0, %hi(.LCPI70_0)
1117 ; RV64-V-NEXT: ld a0, %lo(.LCPI70_0)(a0)
1118 ; RV64-V-NEXT: vsetvli a1, zero, e64, m2, ta, ma
1119 ; RV64-V-NEXT: vmulh.vx v10, v8, a0
1120 ; RV64-V-NEXT: li a0, 63
1121 ; RV64-V-NEXT: vsrl.vx v12, v10, a0
1122 ; RV64-V-NEXT: vsra.vi v10, v10, 1
1123 ; RV64-V-NEXT: vadd.vv v10, v10, v12
1124 ; RV64-V-NEXT: li a0, -7
1125 ; RV64-V-NEXT: vnmsac.vx v8, a0, v10
1127 %head = insertelement <vscale x 2 x i64> poison, i64 -7, i32 0
1128 %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1129 %vc = srem <vscale x 2 x i64> %va, %splat
1130 ret <vscale x 2 x i64> %vc
1133 define <vscale x 4 x i64> @vrem_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
1134 ; CHECK-LABEL: vrem_vv_nxv4i64:
1136 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
1137 ; CHECK-NEXT: vrem.vv v8, v8, v12
1139 %vc = srem <vscale x 4 x i64> %va, %vb
1140 ret <vscale x 4 x i64> %vc
1143 define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
1144 ; RV32-LABEL: vrem_vx_nxv4i64:
1146 ; RV32-NEXT: addi sp, sp, -16
1147 ; RV32-NEXT: .cfi_def_cfa_offset 16
1148 ; RV32-NEXT: sw a1, 12(sp)
1149 ; RV32-NEXT: sw a0, 8(sp)
1150 ; RV32-NEXT: addi a0, sp, 8
1151 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1152 ; RV32-NEXT: vlse64.v v12, (a0), zero
1153 ; RV32-NEXT: vrem.vv v8, v8, v12
1154 ; RV32-NEXT: addi sp, sp, 16
1157 ; RV64-LABEL: vrem_vx_nxv4i64:
1159 ; RV64-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1160 ; RV64-NEXT: vrem.vx v8, v8, a0
1162 %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1163 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1164 %vc = srem <vscale x 4 x i64> %va, %splat
1165 ret <vscale x 4 x i64> %vc
1168 define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
1169 ; RV32-V-LABEL: vrem_vi_nxv4i64_0:
1171 ; RV32-V-NEXT: addi sp, sp, -16
1172 ; RV32-V-NEXT: .cfi_def_cfa_offset 16
1173 ; RV32-V-NEXT: lui a0, 748983
1174 ; RV32-V-NEXT: addi a0, a0, -586
1175 ; RV32-V-NEXT: sw a0, 12(sp)
1176 ; RV32-V-NEXT: lui a0, 898779
1177 ; RV32-V-NEXT: addi a0, a0, 1755
1178 ; RV32-V-NEXT: sw a0, 8(sp)
1179 ; RV32-V-NEXT: addi a0, sp, 8
1180 ; RV32-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1181 ; RV32-V-NEXT: vlse64.v v12, (a0), zero
1182 ; RV32-V-NEXT: vmulh.vv v12, v8, v12
1183 ; RV32-V-NEXT: li a0, 63
1184 ; RV32-V-NEXT: vsrl.vx v16, v12, a0
1185 ; RV32-V-NEXT: vsra.vi v12, v12, 1
1186 ; RV32-V-NEXT: vadd.vv v12, v12, v16
1187 ; RV32-V-NEXT: li a0, -7
1188 ; RV32-V-NEXT: vnmsac.vx v8, a0, v12
1189 ; RV32-V-NEXT: addi sp, sp, 16
1192 ; ZVE64X-LABEL: vrem_vi_nxv4i64_0:
1194 ; ZVE64X-NEXT: li a0, -7
1195 ; ZVE64X-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1196 ; ZVE64X-NEXT: vrem.vx v8, v8, a0
1199 ; RV64-V-LABEL: vrem_vi_nxv4i64_0:
1201 ; RV64-V-NEXT: lui a0, %hi(.LCPI73_0)
1202 ; RV64-V-NEXT: ld a0, %lo(.LCPI73_0)(a0)
1203 ; RV64-V-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1204 ; RV64-V-NEXT: vmulh.vx v12, v8, a0
1205 ; RV64-V-NEXT: li a0, 63
1206 ; RV64-V-NEXT: vsrl.vx v16, v12, a0
1207 ; RV64-V-NEXT: vsra.vi v12, v12, 1
1208 ; RV64-V-NEXT: vadd.vv v12, v12, v16
1209 ; RV64-V-NEXT: li a0, -7
1210 ; RV64-V-NEXT: vnmsac.vx v8, a0, v12
1212 %head = insertelement <vscale x 4 x i64> poison, i64 -7, i32 0
1213 %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1214 %vc = srem <vscale x 4 x i64> %va, %splat
1215 ret <vscale x 4 x i64> %vc
1218 define <vscale x 8 x i64> @vrem_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
1219 ; CHECK-LABEL: vrem_vv_nxv8i64:
1221 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
1222 ; CHECK-NEXT: vrem.vv v8, v8, v16
1224 %vc = srem <vscale x 8 x i64> %va, %vb
1225 ret <vscale x 8 x i64> %vc
1228 define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
1229 ; RV32-LABEL: vrem_vx_nxv8i64:
1231 ; RV32-NEXT: addi sp, sp, -16
1232 ; RV32-NEXT: .cfi_def_cfa_offset 16
1233 ; RV32-NEXT: sw a1, 12(sp)
1234 ; RV32-NEXT: sw a0, 8(sp)
1235 ; RV32-NEXT: addi a0, sp, 8
1236 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1237 ; RV32-NEXT: vlse64.v v16, (a0), zero
1238 ; RV32-NEXT: vrem.vv v8, v8, v16
1239 ; RV32-NEXT: addi sp, sp, 16
1242 ; RV64-LABEL: vrem_vx_nxv8i64:
1244 ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1245 ; RV64-NEXT: vrem.vx v8, v8, a0
1247 %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1248 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1249 %vc = srem <vscale x 8 x i64> %va, %splat
1250 ret <vscale x 8 x i64> %vc
1253 define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
1254 ; RV32-V-LABEL: vrem_vi_nxv8i64_0:
1256 ; RV32-V-NEXT: addi sp, sp, -16
1257 ; RV32-V-NEXT: .cfi_def_cfa_offset 16
1258 ; RV32-V-NEXT: lui a0, 748983
1259 ; RV32-V-NEXT: addi a0, a0, -586
1260 ; RV32-V-NEXT: sw a0, 12(sp)
1261 ; RV32-V-NEXT: lui a0, 898779
1262 ; RV32-V-NEXT: addi a0, a0, 1755
1263 ; RV32-V-NEXT: sw a0, 8(sp)
1264 ; RV32-V-NEXT: addi a0, sp, 8
1265 ; RV32-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1266 ; RV32-V-NEXT: vlse64.v v16, (a0), zero
1267 ; RV32-V-NEXT: vmulh.vv v16, v8, v16
1268 ; RV32-V-NEXT: li a0, 63
1269 ; RV32-V-NEXT: vsrl.vx v24, v16, a0
1270 ; RV32-V-NEXT: vsra.vi v16, v16, 1
1271 ; RV32-V-NEXT: vadd.vv v16, v16, v24
1272 ; RV32-V-NEXT: li a0, -7
1273 ; RV32-V-NEXT: vnmsac.vx v8, a0, v16
1274 ; RV32-V-NEXT: addi sp, sp, 16
1277 ; ZVE64X-LABEL: vrem_vi_nxv8i64_0:
1279 ; ZVE64X-NEXT: li a0, -7
1280 ; ZVE64X-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1281 ; ZVE64X-NEXT: vrem.vx v8, v8, a0
1284 ; RV64-V-LABEL: vrem_vi_nxv8i64_0:
1286 ; RV64-V-NEXT: lui a0, %hi(.LCPI76_0)
1287 ; RV64-V-NEXT: ld a0, %lo(.LCPI76_0)(a0)
1288 ; RV64-V-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1289 ; RV64-V-NEXT: vmulh.vx v16, v8, a0
1290 ; RV64-V-NEXT: li a0, 63
1291 ; RV64-V-NEXT: vsrl.vx v24, v16, a0
1292 ; RV64-V-NEXT: vsra.vi v16, v16, 1
1293 ; RV64-V-NEXT: vadd.vv v16, v16, v24
1294 ; RV64-V-NEXT: li a0, -7
1295 ; RV64-V-NEXT: vnmsac.vx v8, a0, v16
1297 %head = insertelement <vscale x 8 x i64> poison, i64 -7, i32 0
1298 %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1299 %vc = srem <vscale x 8 x i64> %va, %splat
1300 ret <vscale x 8 x i64> %vc