1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 8 x i7> @llvm.vp.srem.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
9 define <vscale x 8 x i7> @vrem_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10 ; CHECK-LABEL: vrem_vx_nxv8i7:
12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
13 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
14 ; CHECK-NEXT: vmv.v.x v9, a0
15 ; CHECK-NEXT: vsra.vi v8, v8, 1, v0.t
16 ; CHECK-NEXT: vsll.vi v9, v9, 1, v0.t
17 ; CHECK-NEXT: vsra.vi v9, v9, 1, v0.t
18 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
20 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
21 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
22 %v = call <vscale x 8 x i7> @llvm.vp.srem.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
23 ret <vscale x 8 x i7> %v
26 declare <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
28 define <vscale x 1 x i8> @vrem_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
29 ; CHECK-LABEL: vrem_vv_nxv1i8:
31 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
32 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
34 %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
35 ret <vscale x 1 x i8> %v
38 define <vscale x 1 x i8> @vrem_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
39 ; CHECK-LABEL: vrem_vv_nxv1i8_unmasked:
41 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
42 ; CHECK-NEXT: vrem.vv v8, v8, v9
44 %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
45 ret <vscale x 1 x i8> %v
48 define <vscale x 1 x i8> @vrem_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
49 ; CHECK-LABEL: vrem_vx_nxv1i8:
51 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
52 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
54 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
55 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
56 %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
57 ret <vscale x 1 x i8> %v
60 define <vscale x 1 x i8> @vrem_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
61 ; CHECK-LABEL: vrem_vx_nxv1i8_unmasked:
63 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
64 ; CHECK-NEXT: vrem.vx v8, v8, a0
66 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
67 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
68 %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
69 ret <vscale x 1 x i8> %v
72 declare <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
74 define <vscale x 2 x i8> @vrem_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
75 ; CHECK-LABEL: vrem_vv_nxv2i8:
77 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
78 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
80 %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
81 ret <vscale x 2 x i8> %v
84 define <vscale x 2 x i8> @vrem_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
85 ; CHECK-LABEL: vrem_vv_nxv2i8_unmasked:
87 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
88 ; CHECK-NEXT: vrem.vv v8, v8, v9
90 %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
91 ret <vscale x 2 x i8> %v
94 define <vscale x 2 x i8> @vrem_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
95 ; CHECK-LABEL: vrem_vx_nxv2i8:
97 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
98 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
100 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
101 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
102 %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
103 ret <vscale x 2 x i8> %v
106 define <vscale x 2 x i8> @vrem_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
107 ; CHECK-LABEL: vrem_vx_nxv2i8_unmasked:
109 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
110 ; CHECK-NEXT: vrem.vx v8, v8, a0
112 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
113 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
114 %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
115 ret <vscale x 2 x i8> %v
118 declare <vscale x 3 x i8> @llvm.vp.srem.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
120 define <vscale x 3 x i8> @vrem_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
121 ; CHECK-LABEL: vrem_vv_nxv3i8:
123 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
124 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
126 %v = call <vscale x 3 x i8> @llvm.vp.srem.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
127 ret <vscale x 3 x i8> %v
130 declare <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
132 define <vscale x 4 x i8> @vrem_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
133 ; CHECK-LABEL: vrem_vv_nxv4i8:
135 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
136 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
138 %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
139 ret <vscale x 4 x i8> %v
142 define <vscale x 4 x i8> @vrem_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
143 ; CHECK-LABEL: vrem_vv_nxv4i8_unmasked:
145 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
146 ; CHECK-NEXT: vrem.vv v8, v8, v9
148 %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
149 ret <vscale x 4 x i8> %v
152 define <vscale x 4 x i8> @vrem_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
153 ; CHECK-LABEL: vrem_vx_nxv4i8:
155 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
156 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
158 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
159 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
160 %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
161 ret <vscale x 4 x i8> %v
164 define <vscale x 4 x i8> @vrem_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
165 ; CHECK-LABEL: vrem_vx_nxv4i8_unmasked:
167 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
168 ; CHECK-NEXT: vrem.vx v8, v8, a0
170 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
171 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
172 %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
173 ret <vscale x 4 x i8> %v
176 declare <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
178 define <vscale x 8 x i8> @vrem_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
179 ; CHECK-LABEL: vrem_vv_nxv8i8:
181 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
182 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
184 %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
185 ret <vscale x 8 x i8> %v
188 define <vscale x 8 x i8> @vrem_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
189 ; CHECK-LABEL: vrem_vv_nxv8i8_unmasked:
191 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
192 ; CHECK-NEXT: vrem.vv v8, v8, v9
194 %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
195 ret <vscale x 8 x i8> %v
198 define <vscale x 8 x i8> @vrem_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
199 ; CHECK-LABEL: vrem_vx_nxv8i8:
201 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
202 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
204 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
205 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
206 %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
207 ret <vscale x 8 x i8> %v
210 define <vscale x 8 x i8> @vrem_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
211 ; CHECK-LABEL: vrem_vx_nxv8i8_unmasked:
213 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
214 ; CHECK-NEXT: vrem.vx v8, v8, a0
216 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
217 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
218 %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
219 ret <vscale x 8 x i8> %v
222 declare <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
224 define <vscale x 16 x i8> @vrem_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
225 ; CHECK-LABEL: vrem_vv_nxv16i8:
227 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
228 ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
230 %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
231 ret <vscale x 16 x i8> %v
234 define <vscale x 16 x i8> @vrem_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
235 ; CHECK-LABEL: vrem_vv_nxv16i8_unmasked:
237 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
238 ; CHECK-NEXT: vrem.vv v8, v8, v10
240 %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
241 ret <vscale x 16 x i8> %v
244 define <vscale x 16 x i8> @vrem_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
245 ; CHECK-LABEL: vrem_vx_nxv16i8:
247 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
248 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
250 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
251 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
252 %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
253 ret <vscale x 16 x i8> %v
256 define <vscale x 16 x i8> @vrem_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
257 ; CHECK-LABEL: vrem_vx_nxv16i8_unmasked:
259 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
260 ; CHECK-NEXT: vrem.vx v8, v8, a0
262 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
263 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
264 %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
265 ret <vscale x 16 x i8> %v
268 declare <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
270 define <vscale x 32 x i8> @vrem_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
271 ; CHECK-LABEL: vrem_vv_nxv32i8:
273 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
274 ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
276 %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
277 ret <vscale x 32 x i8> %v
280 define <vscale x 32 x i8> @vrem_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
281 ; CHECK-LABEL: vrem_vv_nxv32i8_unmasked:
283 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
284 ; CHECK-NEXT: vrem.vv v8, v8, v12
286 %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
287 ret <vscale x 32 x i8> %v
290 define <vscale x 32 x i8> @vrem_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
291 ; CHECK-LABEL: vrem_vx_nxv32i8:
293 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
294 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
296 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
297 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
298 %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
299 ret <vscale x 32 x i8> %v
302 define <vscale x 32 x i8> @vrem_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
303 ; CHECK-LABEL: vrem_vx_nxv32i8_unmasked:
305 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
306 ; CHECK-NEXT: vrem.vx v8, v8, a0
308 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
309 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
310 %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
311 ret <vscale x 32 x i8> %v
314 declare <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
316 define <vscale x 64 x i8> @vrem_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
317 ; CHECK-LABEL: vrem_vv_nxv64i8:
319 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
320 ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
322 %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
323 ret <vscale x 64 x i8> %v
326 define <vscale x 64 x i8> @vrem_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
327 ; CHECK-LABEL: vrem_vv_nxv64i8_unmasked:
329 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
330 ; CHECK-NEXT: vrem.vv v8, v8, v16
332 %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
333 ret <vscale x 64 x i8> %v
336 define <vscale x 64 x i8> @vrem_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
337 ; CHECK-LABEL: vrem_vx_nxv64i8:
339 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
340 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
342 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
343 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
344 %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
345 ret <vscale x 64 x i8> %v
348 define <vscale x 64 x i8> @vrem_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
349 ; CHECK-LABEL: vrem_vx_nxv64i8_unmasked:
351 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
352 ; CHECK-NEXT: vrem.vx v8, v8, a0
354 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
355 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
356 %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
357 ret <vscale x 64 x i8> %v
360 declare <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
362 define <vscale x 1 x i16> @vrem_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
363 ; CHECK-LABEL: vrem_vv_nxv1i16:
365 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
366 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
368 %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
369 ret <vscale x 1 x i16> %v
372 define <vscale x 1 x i16> @vrem_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
373 ; CHECK-LABEL: vrem_vv_nxv1i16_unmasked:
375 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
376 ; CHECK-NEXT: vrem.vv v8, v8, v9
378 %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
379 ret <vscale x 1 x i16> %v
382 define <vscale x 1 x i16> @vrem_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
383 ; CHECK-LABEL: vrem_vx_nxv1i16:
385 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
386 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
388 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
389 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
390 %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
391 ret <vscale x 1 x i16> %v
394 define <vscale x 1 x i16> @vrem_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
395 ; CHECK-LABEL: vrem_vx_nxv1i16_unmasked:
397 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
398 ; CHECK-NEXT: vrem.vx v8, v8, a0
400 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
401 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
402 %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
403 ret <vscale x 1 x i16> %v
406 declare <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
408 define <vscale x 2 x i16> @vrem_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
409 ; CHECK-LABEL: vrem_vv_nxv2i16:
411 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
412 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
414 %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
415 ret <vscale x 2 x i16> %v
418 define <vscale x 2 x i16> @vrem_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
419 ; CHECK-LABEL: vrem_vv_nxv2i16_unmasked:
421 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
422 ; CHECK-NEXT: vrem.vv v8, v8, v9
424 %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
425 ret <vscale x 2 x i16> %v
428 define <vscale x 2 x i16> @vrem_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
429 ; CHECK-LABEL: vrem_vx_nxv2i16:
431 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
432 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
434 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
435 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
436 %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
437 ret <vscale x 2 x i16> %v
440 define <vscale x 2 x i16> @vrem_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
441 ; CHECK-LABEL: vrem_vx_nxv2i16_unmasked:
443 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
444 ; CHECK-NEXT: vrem.vx v8, v8, a0
446 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
447 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
448 %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
449 ret <vscale x 2 x i16> %v
452 declare <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
454 define <vscale x 4 x i16> @vrem_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
455 ; CHECK-LABEL: vrem_vv_nxv4i16:
457 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
458 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
460 %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
461 ret <vscale x 4 x i16> %v
464 define <vscale x 4 x i16> @vrem_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
465 ; CHECK-LABEL: vrem_vv_nxv4i16_unmasked:
467 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
468 ; CHECK-NEXT: vrem.vv v8, v8, v9
470 %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
471 ret <vscale x 4 x i16> %v
474 define <vscale x 4 x i16> @vrem_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
475 ; CHECK-LABEL: vrem_vx_nxv4i16:
477 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
478 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
480 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
481 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
482 %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
483 ret <vscale x 4 x i16> %v
486 define <vscale x 4 x i16> @vrem_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
487 ; CHECK-LABEL: vrem_vx_nxv4i16_unmasked:
489 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
490 ; CHECK-NEXT: vrem.vx v8, v8, a0
492 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
493 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
494 %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
495 ret <vscale x 4 x i16> %v
498 declare <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
500 define <vscale x 8 x i16> @vrem_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
501 ; CHECK-LABEL: vrem_vv_nxv8i16:
503 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
504 ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
506 %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
507 ret <vscale x 8 x i16> %v
510 define <vscale x 8 x i16> @vrem_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
511 ; CHECK-LABEL: vrem_vv_nxv8i16_unmasked:
513 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
514 ; CHECK-NEXT: vrem.vv v8, v8, v10
516 %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
517 ret <vscale x 8 x i16> %v
520 define <vscale x 8 x i16> @vrem_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
521 ; CHECK-LABEL: vrem_vx_nxv8i16:
523 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
524 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
526 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
527 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
528 %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
529 ret <vscale x 8 x i16> %v
532 define <vscale x 8 x i16> @vrem_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
533 ; CHECK-LABEL: vrem_vx_nxv8i16_unmasked:
535 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
536 ; CHECK-NEXT: vrem.vx v8, v8, a0
538 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
539 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
540 %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
541 ret <vscale x 8 x i16> %v
544 declare <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
546 define <vscale x 16 x i16> @vrem_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
547 ; CHECK-LABEL: vrem_vv_nxv16i16:
549 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
550 ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
552 %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
553 ret <vscale x 16 x i16> %v
556 define <vscale x 16 x i16> @vrem_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
557 ; CHECK-LABEL: vrem_vv_nxv16i16_unmasked:
559 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
560 ; CHECK-NEXT: vrem.vv v8, v8, v12
562 %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
563 ret <vscale x 16 x i16> %v
566 define <vscale x 16 x i16> @vrem_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
567 ; CHECK-LABEL: vrem_vx_nxv16i16:
569 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
570 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
572 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
573 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
574 %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
575 ret <vscale x 16 x i16> %v
578 define <vscale x 16 x i16> @vrem_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
579 ; CHECK-LABEL: vrem_vx_nxv16i16_unmasked:
581 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
582 ; CHECK-NEXT: vrem.vx v8, v8, a0
584 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
585 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
586 %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
587 ret <vscale x 16 x i16> %v
590 declare <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
592 define <vscale x 32 x i16> @vrem_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
593 ; CHECK-LABEL: vrem_vv_nxv32i16:
595 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
596 ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
598 %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
599 ret <vscale x 32 x i16> %v
602 define <vscale x 32 x i16> @vrem_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
603 ; CHECK-LABEL: vrem_vv_nxv32i16_unmasked:
605 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
606 ; CHECK-NEXT: vrem.vv v8, v8, v16
608 %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
609 ret <vscale x 32 x i16> %v
612 define <vscale x 32 x i16> @vrem_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
613 ; CHECK-LABEL: vrem_vx_nxv32i16:
615 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
616 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
618 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
619 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
620 %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
621 ret <vscale x 32 x i16> %v
624 define <vscale x 32 x i16> @vrem_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
625 ; CHECK-LABEL: vrem_vx_nxv32i16_unmasked:
627 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
628 ; CHECK-NEXT: vrem.vx v8, v8, a0
630 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
631 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
632 %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
633 ret <vscale x 32 x i16> %v
636 declare <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
638 define <vscale x 1 x i32> @vrem_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
639 ; CHECK-LABEL: vrem_vv_nxv1i32:
641 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
642 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
644 %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
645 ret <vscale x 1 x i32> %v
648 define <vscale x 1 x i32> @vrem_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
649 ; CHECK-LABEL: vrem_vv_nxv1i32_unmasked:
651 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
652 ; CHECK-NEXT: vrem.vv v8, v8, v9
654 %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
655 ret <vscale x 1 x i32> %v
658 define <vscale x 1 x i32> @vrem_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
659 ; CHECK-LABEL: vrem_vx_nxv1i32:
661 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
662 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
664 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
665 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
666 %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
667 ret <vscale x 1 x i32> %v
670 define <vscale x 1 x i32> @vrem_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
671 ; CHECK-LABEL: vrem_vx_nxv1i32_unmasked:
673 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
674 ; CHECK-NEXT: vrem.vx v8, v8, a0
676 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
677 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
678 %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
679 ret <vscale x 1 x i32> %v
682 declare <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
684 define <vscale x 2 x i32> @vrem_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
685 ; CHECK-LABEL: vrem_vv_nxv2i32:
687 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
688 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
690 %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
691 ret <vscale x 2 x i32> %v
694 define <vscale x 2 x i32> @vrem_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
695 ; CHECK-LABEL: vrem_vv_nxv2i32_unmasked:
697 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
698 ; CHECK-NEXT: vrem.vv v8, v8, v9
700 %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
701 ret <vscale x 2 x i32> %v
704 define <vscale x 2 x i32> @vrem_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
705 ; CHECK-LABEL: vrem_vx_nxv2i32:
707 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
708 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
710 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
711 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
712 %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
713 ret <vscale x 2 x i32> %v
716 define <vscale x 2 x i32> @vrem_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
717 ; CHECK-LABEL: vrem_vx_nxv2i32_unmasked:
719 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
720 ; CHECK-NEXT: vrem.vx v8, v8, a0
722 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
723 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
724 %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
725 ret <vscale x 2 x i32> %v
728 declare <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
730 define <vscale x 4 x i32> @vrem_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
731 ; CHECK-LABEL: vrem_vv_nxv4i32:
733 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
734 ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
736 %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
737 ret <vscale x 4 x i32> %v
740 define <vscale x 4 x i32> @vrem_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
741 ; CHECK-LABEL: vrem_vv_nxv4i32_unmasked:
743 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
744 ; CHECK-NEXT: vrem.vv v8, v8, v10
746 %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
747 ret <vscale x 4 x i32> %v
750 define <vscale x 4 x i32> @vrem_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
751 ; CHECK-LABEL: vrem_vx_nxv4i32:
753 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
754 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
756 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
757 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
758 %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
759 ret <vscale x 4 x i32> %v
762 define <vscale x 4 x i32> @vrem_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
763 ; CHECK-LABEL: vrem_vx_nxv4i32_unmasked:
765 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
766 ; CHECK-NEXT: vrem.vx v8, v8, a0
768 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
769 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
770 %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
771 ret <vscale x 4 x i32> %v
774 declare <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
776 define <vscale x 8 x i32> @vrem_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
777 ; CHECK-LABEL: vrem_vv_nxv8i32:
779 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
780 ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
782 %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
783 ret <vscale x 8 x i32> %v
786 define <vscale x 8 x i32> @vrem_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
787 ; CHECK-LABEL: vrem_vv_nxv8i32_unmasked:
789 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
790 ; CHECK-NEXT: vrem.vv v8, v8, v12
792 %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
793 ret <vscale x 8 x i32> %v
796 define <vscale x 8 x i32> @vrem_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
797 ; CHECK-LABEL: vrem_vx_nxv8i32:
799 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
800 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
802 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
803 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
804 %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
805 ret <vscale x 8 x i32> %v
808 define <vscale x 8 x i32> @vrem_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
809 ; CHECK-LABEL: vrem_vx_nxv8i32_unmasked:
811 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
812 ; CHECK-NEXT: vrem.vx v8, v8, a0
814 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
815 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
816 %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
817 ret <vscale x 8 x i32> %v
820 declare <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
822 define <vscale x 16 x i32> @vrem_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
823 ; CHECK-LABEL: vrem_vv_nxv16i32:
825 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
826 ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
828 %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
829 ret <vscale x 16 x i32> %v
832 define <vscale x 16 x i32> @vrem_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
833 ; CHECK-LABEL: vrem_vv_nxv16i32_unmasked:
835 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
836 ; CHECK-NEXT: vrem.vv v8, v8, v16
838 %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
839 ret <vscale x 16 x i32> %v
842 define <vscale x 16 x i32> @vrem_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
843 ; CHECK-LABEL: vrem_vx_nxv16i32:
845 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
846 ; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
848 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
849 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
850 %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
851 ret <vscale x 16 x i32> %v
854 define <vscale x 16 x i32> @vrem_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
855 ; CHECK-LABEL: vrem_vx_nxv16i32_unmasked:
857 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
858 ; CHECK-NEXT: vrem.vx v8, v8, a0
860 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
861 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
862 %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
863 ret <vscale x 16 x i32> %v
866 declare <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
868 define <vscale x 1 x i64> @vrem_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
869 ; CHECK-LABEL: vrem_vv_nxv1i64:
871 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
872 ; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
874 %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
875 ret <vscale x 1 x i64> %v
878 define <vscale x 1 x i64> @vrem_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
879 ; CHECK-LABEL: vrem_vv_nxv1i64_unmasked:
881 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
882 ; CHECK-NEXT: vrem.vv v8, v8, v9
884 %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
885 ret <vscale x 1 x i64> %v
888 define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
889 ; RV32-LABEL: vrem_vx_nxv1i64:
891 ; RV32-NEXT: addi sp, sp, -16
892 ; RV32-NEXT: .cfi_def_cfa_offset 16
893 ; RV32-NEXT: sw a0, 8(sp)
894 ; RV32-NEXT: sw a1, 12(sp)
895 ; RV32-NEXT: addi a0, sp, 8
896 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
897 ; RV32-NEXT: vlse64.v v9, (a0), zero
898 ; RV32-NEXT: vrem.vv v8, v8, v9, v0.t
899 ; RV32-NEXT: addi sp, sp, 16
900 ; RV32-NEXT: .cfi_def_cfa_offset 0
903 ; RV64-LABEL: vrem_vx_nxv1i64:
905 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
906 ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
908 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
909 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
910 %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
911 ret <vscale x 1 x i64> %v
914 define <vscale x 1 x i64> @vrem_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
915 ; RV32-LABEL: vrem_vx_nxv1i64_unmasked:
917 ; RV32-NEXT: addi sp, sp, -16
918 ; RV32-NEXT: .cfi_def_cfa_offset 16
919 ; RV32-NEXT: sw a0, 8(sp)
920 ; RV32-NEXT: sw a1, 12(sp)
921 ; RV32-NEXT: addi a0, sp, 8
922 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
923 ; RV32-NEXT: vlse64.v v9, (a0), zero
924 ; RV32-NEXT: vrem.vv v8, v8, v9
925 ; RV32-NEXT: addi sp, sp, 16
926 ; RV32-NEXT: .cfi_def_cfa_offset 0
929 ; RV64-LABEL: vrem_vx_nxv1i64_unmasked:
931 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
932 ; RV64-NEXT: vrem.vx v8, v8, a0
934 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
935 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
936 %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
937 ret <vscale x 1 x i64> %v
940 declare <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
942 define <vscale x 2 x i64> @vrem_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
943 ; CHECK-LABEL: vrem_vv_nxv2i64:
945 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
946 ; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
948 %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
949 ret <vscale x 2 x i64> %v
952 define <vscale x 2 x i64> @vrem_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
953 ; CHECK-LABEL: vrem_vv_nxv2i64_unmasked:
955 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
956 ; CHECK-NEXT: vrem.vv v8, v8, v10
958 %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
959 ret <vscale x 2 x i64> %v
962 define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
963 ; RV32-LABEL: vrem_vx_nxv2i64:
965 ; RV32-NEXT: addi sp, sp, -16
966 ; RV32-NEXT: .cfi_def_cfa_offset 16
967 ; RV32-NEXT: sw a0, 8(sp)
968 ; RV32-NEXT: sw a1, 12(sp)
969 ; RV32-NEXT: addi a0, sp, 8
970 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
971 ; RV32-NEXT: vlse64.v v10, (a0), zero
972 ; RV32-NEXT: vrem.vv v8, v8, v10, v0.t
973 ; RV32-NEXT: addi sp, sp, 16
974 ; RV32-NEXT: .cfi_def_cfa_offset 0
977 ; RV64-LABEL: vrem_vx_nxv2i64:
979 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
980 ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
982 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
983 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
984 %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
985 ret <vscale x 2 x i64> %v
988 define <vscale x 2 x i64> @vrem_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
989 ; RV32-LABEL: vrem_vx_nxv2i64_unmasked:
991 ; RV32-NEXT: addi sp, sp, -16
992 ; RV32-NEXT: .cfi_def_cfa_offset 16
993 ; RV32-NEXT: sw a0, 8(sp)
994 ; RV32-NEXT: sw a1, 12(sp)
995 ; RV32-NEXT: addi a0, sp, 8
996 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
997 ; RV32-NEXT: vlse64.v v10, (a0), zero
998 ; RV32-NEXT: vrem.vv v8, v8, v10
999 ; RV32-NEXT: addi sp, sp, 16
1000 ; RV32-NEXT: .cfi_def_cfa_offset 0
1003 ; RV64-LABEL: vrem_vx_nxv2i64_unmasked:
1005 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1006 ; RV64-NEXT: vrem.vx v8, v8, a0
1008 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1009 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1010 %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1011 ret <vscale x 2 x i64> %v
1014 declare <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1016 define <vscale x 4 x i64> @vrem_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1017 ; CHECK-LABEL: vrem_vv_nxv4i64:
1019 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1020 ; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
1022 %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1023 ret <vscale x 4 x i64> %v
1026 define <vscale x 4 x i64> @vrem_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1027 ; CHECK-LABEL: vrem_vv_nxv4i64_unmasked:
1029 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1030 ; CHECK-NEXT: vrem.vv v8, v8, v12
1032 %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1033 ret <vscale x 4 x i64> %v
1036 define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1037 ; RV32-LABEL: vrem_vx_nxv4i64:
1039 ; RV32-NEXT: addi sp, sp, -16
1040 ; RV32-NEXT: .cfi_def_cfa_offset 16
1041 ; RV32-NEXT: sw a0, 8(sp)
1042 ; RV32-NEXT: sw a1, 12(sp)
1043 ; RV32-NEXT: addi a0, sp, 8
1044 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1045 ; RV32-NEXT: vlse64.v v12, (a0), zero
1046 ; RV32-NEXT: vrem.vv v8, v8, v12, v0.t
1047 ; RV32-NEXT: addi sp, sp, 16
1048 ; RV32-NEXT: .cfi_def_cfa_offset 0
1051 ; RV64-LABEL: vrem_vx_nxv4i64:
1053 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1054 ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
1056 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1057 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1058 %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1059 ret <vscale x 4 x i64> %v
1062 define <vscale x 4 x i64> @vrem_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1063 ; RV32-LABEL: vrem_vx_nxv4i64_unmasked:
1065 ; RV32-NEXT: addi sp, sp, -16
1066 ; RV32-NEXT: .cfi_def_cfa_offset 16
1067 ; RV32-NEXT: sw a0, 8(sp)
1068 ; RV32-NEXT: sw a1, 12(sp)
1069 ; RV32-NEXT: addi a0, sp, 8
1070 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1071 ; RV32-NEXT: vlse64.v v12, (a0), zero
1072 ; RV32-NEXT: vrem.vv v8, v8, v12
1073 ; RV32-NEXT: addi sp, sp, 16
1074 ; RV32-NEXT: .cfi_def_cfa_offset 0
1077 ; RV64-LABEL: vrem_vx_nxv4i64_unmasked:
1079 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1080 ; RV64-NEXT: vrem.vx v8, v8, a0
1082 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1083 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1084 %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1085 ret <vscale x 4 x i64> %v
1088 declare <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1090 define <vscale x 8 x i64> @vrem_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1091 ; CHECK-LABEL: vrem_vv_nxv8i64:
1093 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1094 ; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
1096 %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1097 ret <vscale x 8 x i64> %v
1100 define <vscale x 8 x i64> @vrem_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1101 ; CHECK-LABEL: vrem_vv_nxv8i64_unmasked:
1103 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1104 ; CHECK-NEXT: vrem.vv v8, v8, v16
1106 %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1107 ret <vscale x 8 x i64> %v
1110 define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1111 ; RV32-LABEL: vrem_vx_nxv8i64:
1113 ; RV32-NEXT: addi sp, sp, -16
1114 ; RV32-NEXT: .cfi_def_cfa_offset 16
1115 ; RV32-NEXT: sw a0, 8(sp)
1116 ; RV32-NEXT: sw a1, 12(sp)
1117 ; RV32-NEXT: addi a0, sp, 8
1118 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1119 ; RV32-NEXT: vlse64.v v16, (a0), zero
1120 ; RV32-NEXT: vrem.vv v8, v8, v16, v0.t
1121 ; RV32-NEXT: addi sp, sp, 16
1122 ; RV32-NEXT: .cfi_def_cfa_offset 0
1125 ; RV64-LABEL: vrem_vx_nxv8i64:
1127 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1128 ; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
1130 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1131 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1132 %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1133 ret <vscale x 8 x i64> %v
1136 define <vscale x 8 x i64> @vrem_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1137 ; RV32-LABEL: vrem_vx_nxv8i64_unmasked:
1139 ; RV32-NEXT: addi sp, sp, -16
1140 ; RV32-NEXT: .cfi_def_cfa_offset 16
1141 ; RV32-NEXT: sw a0, 8(sp)
1142 ; RV32-NEXT: sw a1, 12(sp)
1143 ; RV32-NEXT: addi a0, sp, 8
1144 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1145 ; RV32-NEXT: vlse64.v v16, (a0), zero
1146 ; RV32-NEXT: vrem.vv v8, v8, v16
1147 ; RV32-NEXT: addi sp, sp, 16
1148 ; RV32-NEXT: .cfi_def_cfa_offset 0
1151 ; RV64-LABEL: vrem_vx_nxv8i64_unmasked:
1153 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1154 ; RV64-NEXT: vrem.vx v8, v8, a0
1156 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1157 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1158 %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1159 ret <vscale x 8 x i64> %v