1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 8 x i7> @llvm.vp.urem.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
9 define <vscale x 8 x i7> @vremu_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10 ; CHECK-LABEL: vremu_vx_nxv8i7:
12 ; CHECK-NEXT: li a2, 127
13 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
14 ; CHECK-NEXT: vand.vx v8, v8, a2, v0.t
15 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma
16 ; CHECK-NEXT: vmv.v.x v9, a0
17 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
18 ; CHECK-NEXT: vand.vx v9, v9, a2, v0.t
19 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
21 %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
22 %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
23 %v = call <vscale x 8 x i7> @llvm.vp.urem.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
24 ret <vscale x 8 x i7> %v
27 declare <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
29 define <vscale x 1 x i8> @vremu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
30 ; CHECK-LABEL: vremu_vv_nxv1i8:
32 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
33 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
35 %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
36 ret <vscale x 1 x i8> %v
39 define <vscale x 1 x i8> @vremu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
40 ; CHECK-LABEL: vremu_vv_nxv1i8_unmasked:
42 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
43 ; CHECK-NEXT: vremu.vv v8, v8, v9
45 %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
46 ret <vscale x 1 x i8> %v
49 define <vscale x 1 x i8> @vremu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
50 ; CHECK-LABEL: vremu_vx_nxv1i8:
52 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
53 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
55 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
56 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
57 %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
58 ret <vscale x 1 x i8> %v
61 define <vscale x 1 x i8> @vremu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
62 ; CHECK-LABEL: vremu_vx_nxv1i8_unmasked:
64 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
65 ; CHECK-NEXT: vremu.vx v8, v8, a0
67 %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
68 %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
69 %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
70 ret <vscale x 1 x i8> %v
73 declare <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
75 define <vscale x 2 x i8> @vremu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
76 ; CHECK-LABEL: vremu_vv_nxv2i8:
78 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
79 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
81 %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
82 ret <vscale x 2 x i8> %v
85 define <vscale x 2 x i8> @vremu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
86 ; CHECK-LABEL: vremu_vv_nxv2i8_unmasked:
88 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
89 ; CHECK-NEXT: vremu.vv v8, v8, v9
91 %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
92 ret <vscale x 2 x i8> %v
95 define <vscale x 2 x i8> @vremu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
96 ; CHECK-LABEL: vremu_vx_nxv2i8:
98 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
99 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
101 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
102 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
103 %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
104 ret <vscale x 2 x i8> %v
107 define <vscale x 2 x i8> @vremu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
108 ; CHECK-LABEL: vremu_vx_nxv2i8_unmasked:
110 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
111 ; CHECK-NEXT: vremu.vx v8, v8, a0
113 %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
114 %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
115 %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
116 ret <vscale x 2 x i8> %v
119 declare <vscale x 3 x i8> @llvm.vp.urem.nxv3i8(<vscale x 3 x i8>, <vscale x 3 x i8>, <vscale x 3 x i1>, i32)
121 define <vscale x 3 x i8> @vremu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
122 ; CHECK-LABEL: vremu_vv_nxv3i8:
124 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
125 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
127 %v = call <vscale x 3 x i8> @llvm.vp.urem.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
128 ret <vscale x 3 x i8> %v
131 declare <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
133 define <vscale x 4 x i8> @vremu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
134 ; CHECK-LABEL: vremu_vv_nxv4i8:
136 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
137 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
139 %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
140 ret <vscale x 4 x i8> %v
143 define <vscale x 4 x i8> @vremu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
144 ; CHECK-LABEL: vremu_vv_nxv4i8_unmasked:
146 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
147 ; CHECK-NEXT: vremu.vv v8, v8, v9
149 %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
150 ret <vscale x 4 x i8> %v
153 define <vscale x 4 x i8> @vremu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
154 ; CHECK-LABEL: vremu_vx_nxv4i8:
156 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
157 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
159 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
160 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
161 %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
162 ret <vscale x 4 x i8> %v
165 define <vscale x 4 x i8> @vremu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
166 ; CHECK-LABEL: vremu_vx_nxv4i8_unmasked:
168 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
169 ; CHECK-NEXT: vremu.vx v8, v8, a0
171 %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
172 %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
173 %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
174 ret <vscale x 4 x i8> %v
177 declare <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
179 define <vscale x 8 x i8> @vremu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
180 ; CHECK-LABEL: vremu_vv_nxv8i8:
182 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
183 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
185 %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
186 ret <vscale x 8 x i8> %v
189 define <vscale x 8 x i8> @vremu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
190 ; CHECK-LABEL: vremu_vv_nxv8i8_unmasked:
192 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
193 ; CHECK-NEXT: vremu.vv v8, v8, v9
195 %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
196 ret <vscale x 8 x i8> %v
199 define <vscale x 8 x i8> @vremu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
200 ; CHECK-LABEL: vremu_vx_nxv8i8:
202 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
203 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
205 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
206 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
207 %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
208 ret <vscale x 8 x i8> %v
211 define <vscale x 8 x i8> @vremu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
212 ; CHECK-LABEL: vremu_vx_nxv8i8_unmasked:
214 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
215 ; CHECK-NEXT: vremu.vx v8, v8, a0
217 %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
218 %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
219 %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
220 ret <vscale x 8 x i8> %v
223 declare <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
225 define <vscale x 16 x i8> @vremu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
226 ; CHECK-LABEL: vremu_vv_nxv16i8:
228 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
229 ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
231 %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
232 ret <vscale x 16 x i8> %v
235 define <vscale x 16 x i8> @vremu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
236 ; CHECK-LABEL: vremu_vv_nxv16i8_unmasked:
238 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
239 ; CHECK-NEXT: vremu.vv v8, v8, v10
241 %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
242 ret <vscale x 16 x i8> %v
245 define <vscale x 16 x i8> @vremu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
246 ; CHECK-LABEL: vremu_vx_nxv16i8:
248 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
249 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
251 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
252 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
253 %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
254 ret <vscale x 16 x i8> %v
257 define <vscale x 16 x i8> @vremu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
258 ; CHECK-LABEL: vremu_vx_nxv16i8_unmasked:
260 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
261 ; CHECK-NEXT: vremu.vx v8, v8, a0
263 %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
264 %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
265 %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
266 ret <vscale x 16 x i8> %v
269 declare <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
271 define <vscale x 32 x i8> @vremu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
272 ; CHECK-LABEL: vremu_vv_nxv32i8:
274 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
275 ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
277 %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
278 ret <vscale x 32 x i8> %v
281 define <vscale x 32 x i8> @vremu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
282 ; CHECK-LABEL: vremu_vv_nxv32i8_unmasked:
284 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
285 ; CHECK-NEXT: vremu.vv v8, v8, v12
287 %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
288 ret <vscale x 32 x i8> %v
291 define <vscale x 32 x i8> @vremu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
292 ; CHECK-LABEL: vremu_vx_nxv32i8:
294 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
295 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
297 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
298 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
299 %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
300 ret <vscale x 32 x i8> %v
303 define <vscale x 32 x i8> @vremu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
304 ; CHECK-LABEL: vremu_vx_nxv32i8_unmasked:
306 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
307 ; CHECK-NEXT: vremu.vx v8, v8, a0
309 %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
310 %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
311 %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
312 ret <vscale x 32 x i8> %v
315 declare <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
317 define <vscale x 64 x i8> @vremu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
318 ; CHECK-LABEL: vremu_vv_nxv64i8:
320 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
321 ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
323 %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
324 ret <vscale x 64 x i8> %v
327 define <vscale x 64 x i8> @vremu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
328 ; CHECK-LABEL: vremu_vv_nxv64i8_unmasked:
330 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
331 ; CHECK-NEXT: vremu.vv v8, v8, v16
333 %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
334 ret <vscale x 64 x i8> %v
337 define <vscale x 64 x i8> @vremu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
338 ; CHECK-LABEL: vremu_vx_nxv64i8:
340 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
341 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
343 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
344 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
345 %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
346 ret <vscale x 64 x i8> %v
349 define <vscale x 64 x i8> @vremu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
350 ; CHECK-LABEL: vremu_vx_nxv64i8_unmasked:
352 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
353 ; CHECK-NEXT: vremu.vx v8, v8, a0
355 %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
356 %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
357 %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
358 ret <vscale x 64 x i8> %v
361 declare <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
363 define <vscale x 1 x i16> @vremu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
364 ; CHECK-LABEL: vremu_vv_nxv1i16:
366 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
367 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
369 %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
370 ret <vscale x 1 x i16> %v
373 define <vscale x 1 x i16> @vremu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
374 ; CHECK-LABEL: vremu_vv_nxv1i16_unmasked:
376 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
377 ; CHECK-NEXT: vremu.vv v8, v8, v9
379 %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
380 ret <vscale x 1 x i16> %v
383 define <vscale x 1 x i16> @vremu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
384 ; CHECK-LABEL: vremu_vx_nxv1i16:
386 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
387 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
389 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
390 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
391 %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
392 ret <vscale x 1 x i16> %v
395 define <vscale x 1 x i16> @vremu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
396 ; CHECK-LABEL: vremu_vx_nxv1i16_unmasked:
398 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
399 ; CHECK-NEXT: vremu.vx v8, v8, a0
401 %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
402 %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
403 %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
404 ret <vscale x 1 x i16> %v
407 declare <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
409 define <vscale x 2 x i16> @vremu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
410 ; CHECK-LABEL: vremu_vv_nxv2i16:
412 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
413 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
415 %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
416 ret <vscale x 2 x i16> %v
419 define <vscale x 2 x i16> @vremu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
420 ; CHECK-LABEL: vremu_vv_nxv2i16_unmasked:
422 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
423 ; CHECK-NEXT: vremu.vv v8, v8, v9
425 %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
426 ret <vscale x 2 x i16> %v
429 define <vscale x 2 x i16> @vremu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
430 ; CHECK-LABEL: vremu_vx_nxv2i16:
432 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
433 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
435 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
436 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
437 %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
438 ret <vscale x 2 x i16> %v
441 define <vscale x 2 x i16> @vremu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
442 ; CHECK-LABEL: vremu_vx_nxv2i16_unmasked:
444 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
445 ; CHECK-NEXT: vremu.vx v8, v8, a0
447 %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
448 %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
449 %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
450 ret <vscale x 2 x i16> %v
453 declare <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
455 define <vscale x 4 x i16> @vremu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
456 ; CHECK-LABEL: vremu_vv_nxv4i16:
458 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
459 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
461 %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
462 ret <vscale x 4 x i16> %v
465 define <vscale x 4 x i16> @vremu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
466 ; CHECK-LABEL: vremu_vv_nxv4i16_unmasked:
468 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
469 ; CHECK-NEXT: vremu.vv v8, v8, v9
471 %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
472 ret <vscale x 4 x i16> %v
475 define <vscale x 4 x i16> @vremu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
476 ; CHECK-LABEL: vremu_vx_nxv4i16:
478 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
479 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
481 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
482 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
483 %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
484 ret <vscale x 4 x i16> %v
487 define <vscale x 4 x i16> @vremu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
488 ; CHECK-LABEL: vremu_vx_nxv4i16_unmasked:
490 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
491 ; CHECK-NEXT: vremu.vx v8, v8, a0
493 %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
494 %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
495 %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
496 ret <vscale x 4 x i16> %v
499 declare <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
501 define <vscale x 8 x i16> @vremu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
502 ; CHECK-LABEL: vremu_vv_nxv8i16:
504 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
505 ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
507 %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
508 ret <vscale x 8 x i16> %v
511 define <vscale x 8 x i16> @vremu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
512 ; CHECK-LABEL: vremu_vv_nxv8i16_unmasked:
514 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
515 ; CHECK-NEXT: vremu.vv v8, v8, v10
517 %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
518 ret <vscale x 8 x i16> %v
521 define <vscale x 8 x i16> @vremu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
522 ; CHECK-LABEL: vremu_vx_nxv8i16:
524 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
525 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
527 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
528 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
529 %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
530 ret <vscale x 8 x i16> %v
533 define <vscale x 8 x i16> @vremu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
534 ; CHECK-LABEL: vremu_vx_nxv8i16_unmasked:
536 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
537 ; CHECK-NEXT: vremu.vx v8, v8, a0
539 %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
540 %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
541 %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
542 ret <vscale x 8 x i16> %v
545 declare <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
547 define <vscale x 16 x i16> @vremu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
548 ; CHECK-LABEL: vremu_vv_nxv16i16:
550 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
551 ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
553 %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
554 ret <vscale x 16 x i16> %v
557 define <vscale x 16 x i16> @vremu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
558 ; CHECK-LABEL: vremu_vv_nxv16i16_unmasked:
560 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
561 ; CHECK-NEXT: vremu.vv v8, v8, v12
563 %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
564 ret <vscale x 16 x i16> %v
567 define <vscale x 16 x i16> @vremu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
568 ; CHECK-LABEL: vremu_vx_nxv16i16:
570 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
571 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
573 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
574 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
575 %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
576 ret <vscale x 16 x i16> %v
579 define <vscale x 16 x i16> @vremu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
580 ; CHECK-LABEL: vremu_vx_nxv16i16_unmasked:
582 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
583 ; CHECK-NEXT: vremu.vx v8, v8, a0
585 %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
586 %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
587 %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
588 ret <vscale x 16 x i16> %v
591 declare <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
593 define <vscale x 32 x i16> @vremu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
594 ; CHECK-LABEL: vremu_vv_nxv32i16:
596 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
597 ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
599 %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
600 ret <vscale x 32 x i16> %v
603 define <vscale x 32 x i16> @vremu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
604 ; CHECK-LABEL: vremu_vv_nxv32i16_unmasked:
606 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
607 ; CHECK-NEXT: vremu.vv v8, v8, v16
609 %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
610 ret <vscale x 32 x i16> %v
613 define <vscale x 32 x i16> @vremu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
614 ; CHECK-LABEL: vremu_vx_nxv32i16:
616 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
617 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
619 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
620 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
621 %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
622 ret <vscale x 32 x i16> %v
625 define <vscale x 32 x i16> @vremu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
626 ; CHECK-LABEL: vremu_vx_nxv32i16_unmasked:
628 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
629 ; CHECK-NEXT: vremu.vx v8, v8, a0
631 %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
632 %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
633 %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
634 ret <vscale x 32 x i16> %v
637 declare <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
639 define <vscale x 1 x i32> @vremu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
640 ; CHECK-LABEL: vremu_vv_nxv1i32:
642 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
643 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
645 %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
646 ret <vscale x 1 x i32> %v
649 define <vscale x 1 x i32> @vremu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
650 ; CHECK-LABEL: vremu_vv_nxv1i32_unmasked:
652 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
653 ; CHECK-NEXT: vremu.vv v8, v8, v9
655 %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
656 ret <vscale x 1 x i32> %v
659 define <vscale x 1 x i32> @vremu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
660 ; CHECK-LABEL: vremu_vx_nxv1i32:
662 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
663 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
665 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
666 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
667 %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
668 ret <vscale x 1 x i32> %v
671 define <vscale x 1 x i32> @vremu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
672 ; CHECK-LABEL: vremu_vx_nxv1i32_unmasked:
674 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
675 ; CHECK-NEXT: vremu.vx v8, v8, a0
677 %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
678 %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
679 %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
680 ret <vscale x 1 x i32> %v
683 declare <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
685 define <vscale x 2 x i32> @vremu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
686 ; CHECK-LABEL: vremu_vv_nxv2i32:
688 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
689 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
691 %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
692 ret <vscale x 2 x i32> %v
695 define <vscale x 2 x i32> @vremu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
696 ; CHECK-LABEL: vremu_vv_nxv2i32_unmasked:
698 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
699 ; CHECK-NEXT: vremu.vv v8, v8, v9
701 %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
702 ret <vscale x 2 x i32> %v
705 define <vscale x 2 x i32> @vremu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
706 ; CHECK-LABEL: vremu_vx_nxv2i32:
708 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
709 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
711 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
712 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
713 %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
714 ret <vscale x 2 x i32> %v
717 define <vscale x 2 x i32> @vremu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
718 ; CHECK-LABEL: vremu_vx_nxv2i32_unmasked:
720 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
721 ; CHECK-NEXT: vremu.vx v8, v8, a0
723 %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
724 %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
725 %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
726 ret <vscale x 2 x i32> %v
729 declare <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
731 define <vscale x 4 x i32> @vremu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
732 ; CHECK-LABEL: vremu_vv_nxv4i32:
734 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
735 ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
737 %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
738 ret <vscale x 4 x i32> %v
741 define <vscale x 4 x i32> @vremu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
742 ; CHECK-LABEL: vremu_vv_nxv4i32_unmasked:
744 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
745 ; CHECK-NEXT: vremu.vv v8, v8, v10
747 %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
748 ret <vscale x 4 x i32> %v
751 define <vscale x 4 x i32> @vremu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
752 ; CHECK-LABEL: vremu_vx_nxv4i32:
754 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
755 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
757 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
758 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
759 %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
760 ret <vscale x 4 x i32> %v
763 define <vscale x 4 x i32> @vremu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
764 ; CHECK-LABEL: vremu_vx_nxv4i32_unmasked:
766 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
767 ; CHECK-NEXT: vremu.vx v8, v8, a0
769 %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
770 %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
771 %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
772 ret <vscale x 4 x i32> %v
775 declare <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
777 define <vscale x 8 x i32> @vremu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
778 ; CHECK-LABEL: vremu_vv_nxv8i32:
780 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
781 ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
783 %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
784 ret <vscale x 8 x i32> %v
787 define <vscale x 8 x i32> @vremu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
788 ; CHECK-LABEL: vremu_vv_nxv8i32_unmasked:
790 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
791 ; CHECK-NEXT: vremu.vv v8, v8, v12
793 %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
794 ret <vscale x 8 x i32> %v
797 define <vscale x 8 x i32> @vremu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
798 ; CHECK-LABEL: vremu_vx_nxv8i32:
800 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
801 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
803 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
804 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
805 %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
806 ret <vscale x 8 x i32> %v
809 define <vscale x 8 x i32> @vremu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
810 ; CHECK-LABEL: vremu_vx_nxv8i32_unmasked:
812 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
813 ; CHECK-NEXT: vremu.vx v8, v8, a0
815 %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
816 %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
817 %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
818 ret <vscale x 8 x i32> %v
821 declare <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
823 define <vscale x 16 x i32> @vremu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
824 ; CHECK-LABEL: vremu_vv_nxv16i32:
826 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
827 ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
829 %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
830 ret <vscale x 16 x i32> %v
833 define <vscale x 16 x i32> @vremu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
834 ; CHECK-LABEL: vremu_vv_nxv16i32_unmasked:
836 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
837 ; CHECK-NEXT: vremu.vv v8, v8, v16
839 %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
840 ret <vscale x 16 x i32> %v
843 define <vscale x 16 x i32> @vremu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
844 ; CHECK-LABEL: vremu_vx_nxv16i32:
846 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
847 ; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
849 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
850 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
851 %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
852 ret <vscale x 16 x i32> %v
855 define <vscale x 16 x i32> @vremu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
856 ; CHECK-LABEL: vremu_vx_nxv16i32_unmasked:
858 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
859 ; CHECK-NEXT: vremu.vx v8, v8, a0
861 %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
862 %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
863 %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
864 ret <vscale x 16 x i32> %v
867 declare <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
869 define <vscale x 1 x i64> @vremu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
870 ; CHECK-LABEL: vremu_vv_nxv1i64:
872 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
873 ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
875 %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
876 ret <vscale x 1 x i64> %v
879 define <vscale x 1 x i64> @vremu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
880 ; CHECK-LABEL: vremu_vv_nxv1i64_unmasked:
882 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
883 ; CHECK-NEXT: vremu.vv v8, v8, v9
885 %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
886 ret <vscale x 1 x i64> %v
889 define <vscale x 1 x i64> @vremu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
890 ; RV32-LABEL: vremu_vx_nxv1i64:
892 ; RV32-NEXT: addi sp, sp, -16
893 ; RV32-NEXT: .cfi_def_cfa_offset 16
894 ; RV32-NEXT: sw a1, 12(sp)
895 ; RV32-NEXT: sw a0, 8(sp)
896 ; RV32-NEXT: addi a0, sp, 8
897 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
898 ; RV32-NEXT: vlse64.v v9, (a0), zero
899 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
900 ; RV32-NEXT: vremu.vv v8, v8, v9, v0.t
901 ; RV32-NEXT: addi sp, sp, 16
904 ; RV64-LABEL: vremu_vx_nxv1i64:
906 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
907 ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
909 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
910 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
911 %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
912 ret <vscale x 1 x i64> %v
915 define <vscale x 1 x i64> @vremu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
916 ; RV32-LABEL: vremu_vx_nxv1i64_unmasked:
918 ; RV32-NEXT: addi sp, sp, -16
919 ; RV32-NEXT: .cfi_def_cfa_offset 16
920 ; RV32-NEXT: sw a1, 12(sp)
921 ; RV32-NEXT: sw a0, 8(sp)
922 ; RV32-NEXT: addi a0, sp, 8
923 ; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
924 ; RV32-NEXT: vlse64.v v9, (a0), zero
925 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
926 ; RV32-NEXT: vremu.vv v8, v8, v9
927 ; RV32-NEXT: addi sp, sp, 16
930 ; RV64-LABEL: vremu_vx_nxv1i64_unmasked:
932 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
933 ; RV64-NEXT: vremu.vx v8, v8, a0
935 %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
936 %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
937 %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
938 ret <vscale x 1 x i64> %v
941 declare <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
943 define <vscale x 2 x i64> @vremu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
944 ; CHECK-LABEL: vremu_vv_nxv2i64:
946 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
947 ; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
949 %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
950 ret <vscale x 2 x i64> %v
953 define <vscale x 2 x i64> @vremu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
954 ; CHECK-LABEL: vremu_vv_nxv2i64_unmasked:
956 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
957 ; CHECK-NEXT: vremu.vv v8, v8, v10
959 %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
960 ret <vscale x 2 x i64> %v
963 define <vscale x 2 x i64> @vremu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
964 ; RV32-LABEL: vremu_vx_nxv2i64:
966 ; RV32-NEXT: addi sp, sp, -16
967 ; RV32-NEXT: .cfi_def_cfa_offset 16
968 ; RV32-NEXT: sw a1, 12(sp)
969 ; RV32-NEXT: sw a0, 8(sp)
970 ; RV32-NEXT: addi a0, sp, 8
971 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
972 ; RV32-NEXT: vlse64.v v10, (a0), zero
973 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
974 ; RV32-NEXT: vremu.vv v8, v8, v10, v0.t
975 ; RV32-NEXT: addi sp, sp, 16
978 ; RV64-LABEL: vremu_vx_nxv2i64:
980 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
981 ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
983 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
984 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
985 %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
986 ret <vscale x 2 x i64> %v
989 define <vscale x 2 x i64> @vremu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
990 ; RV32-LABEL: vremu_vx_nxv2i64_unmasked:
992 ; RV32-NEXT: addi sp, sp, -16
993 ; RV32-NEXT: .cfi_def_cfa_offset 16
994 ; RV32-NEXT: sw a1, 12(sp)
995 ; RV32-NEXT: sw a0, 8(sp)
996 ; RV32-NEXT: addi a0, sp, 8
997 ; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
998 ; RV32-NEXT: vlse64.v v10, (a0), zero
999 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
1000 ; RV32-NEXT: vremu.vv v8, v8, v10
1001 ; RV32-NEXT: addi sp, sp, 16
1004 ; RV64-LABEL: vremu_vx_nxv2i64_unmasked:
1006 ; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
1007 ; RV64-NEXT: vremu.vx v8, v8, a0
1009 %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1010 %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1011 %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1012 ret <vscale x 2 x i64> %v
1015 declare <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1017 define <vscale x 4 x i64> @vremu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1018 ; CHECK-LABEL: vremu_vv_nxv4i64:
1020 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1021 ; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
1023 %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1024 ret <vscale x 4 x i64> %v
1027 define <vscale x 4 x i64> @vremu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1028 ; CHECK-LABEL: vremu_vv_nxv4i64_unmasked:
1030 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1031 ; CHECK-NEXT: vremu.vv v8, v8, v12
1033 %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1034 ret <vscale x 4 x i64> %v
1037 define <vscale x 4 x i64> @vremu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1038 ; RV32-LABEL: vremu_vx_nxv4i64:
1040 ; RV32-NEXT: addi sp, sp, -16
1041 ; RV32-NEXT: .cfi_def_cfa_offset 16
1042 ; RV32-NEXT: sw a1, 12(sp)
1043 ; RV32-NEXT: sw a0, 8(sp)
1044 ; RV32-NEXT: addi a0, sp, 8
1045 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1046 ; RV32-NEXT: vlse64.v v12, (a0), zero
1047 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1048 ; RV32-NEXT: vremu.vv v8, v8, v12, v0.t
1049 ; RV32-NEXT: addi sp, sp, 16
1052 ; RV64-LABEL: vremu_vx_nxv4i64:
1054 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1055 ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
1057 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1058 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1059 %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1060 ret <vscale x 4 x i64> %v
1063 define <vscale x 4 x i64> @vremu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1064 ; RV32-LABEL: vremu_vx_nxv4i64_unmasked:
1066 ; RV32-NEXT: addi sp, sp, -16
1067 ; RV32-NEXT: .cfi_def_cfa_offset 16
1068 ; RV32-NEXT: sw a1, 12(sp)
1069 ; RV32-NEXT: sw a0, 8(sp)
1070 ; RV32-NEXT: addi a0, sp, 8
1071 ; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
1072 ; RV32-NEXT: vlse64.v v12, (a0), zero
1073 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
1074 ; RV32-NEXT: vremu.vv v8, v8, v12
1075 ; RV32-NEXT: addi sp, sp, 16
1078 ; RV64-LABEL: vremu_vx_nxv4i64_unmasked:
1080 ; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
1081 ; RV64-NEXT: vremu.vx v8, v8, a0
1083 %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1084 %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1085 %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1086 ret <vscale x 4 x i64> %v
1089 declare <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1091 define <vscale x 8 x i64> @vremu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1092 ; CHECK-LABEL: vremu_vv_nxv8i64:
1094 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1095 ; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
1097 %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1098 ret <vscale x 8 x i64> %v
1101 define <vscale x 8 x i64> @vremu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1102 ; CHECK-LABEL: vremu_vv_nxv8i64_unmasked:
1104 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1105 ; CHECK-NEXT: vremu.vv v8, v8, v16
1107 %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1108 ret <vscale x 8 x i64> %v
1111 define <vscale x 8 x i64> @vremu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1112 ; RV32-LABEL: vremu_vx_nxv8i64:
1114 ; RV32-NEXT: addi sp, sp, -16
1115 ; RV32-NEXT: .cfi_def_cfa_offset 16
1116 ; RV32-NEXT: sw a1, 12(sp)
1117 ; RV32-NEXT: sw a0, 8(sp)
1118 ; RV32-NEXT: addi a0, sp, 8
1119 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1120 ; RV32-NEXT: vlse64.v v16, (a0), zero
1121 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1122 ; RV32-NEXT: vremu.vv v8, v8, v16, v0.t
1123 ; RV32-NEXT: addi sp, sp, 16
1126 ; RV64-LABEL: vremu_vx_nxv8i64:
1128 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1129 ; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
1131 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1132 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1133 %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1134 ret <vscale x 8 x i64> %v
1137 define <vscale x 8 x i64> @vremu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1138 ; RV32-LABEL: vremu_vx_nxv8i64_unmasked:
1140 ; RV32-NEXT: addi sp, sp, -16
1141 ; RV32-NEXT: .cfi_def_cfa_offset 16
1142 ; RV32-NEXT: sw a1, 12(sp)
1143 ; RV32-NEXT: sw a0, 8(sp)
1144 ; RV32-NEXT: addi a0, sp, 8
1145 ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
1146 ; RV32-NEXT: vlse64.v v16, (a0), zero
1147 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
1148 ; RV32-NEXT: vremu.vv v8, v8, v16
1149 ; RV32-NEXT: addi sp, sp, 16
1152 ; RV64-LABEL: vremu_vx_nxv8i64_unmasked:
1154 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
1155 ; RV64-NEXT: vremu.vx v8, v8, a0
1157 %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1158 %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1159 %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1160 ret <vscale x 8 x i64> %v