1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4 declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, i32)
6 define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
7 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
8 ; CHECK: # %bb.0: # %entry
9 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
10 ; CHECK-NEXT: vmv.s.x v8, a0
13 %a = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8> %0, i8 %1, i32 %2)
14 ret <vscale x 1 x i8> %a
17 declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, i32)
19 define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
20 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
21 ; CHECK: # %bb.0: # %entry
22 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
23 ; CHECK-NEXT: vmv.s.x v8, a0
26 %a = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8> %0, i8 %1, i32 %2)
27 ret <vscale x 2 x i8> %a
30 declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, i32)
32 define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
33 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
34 ; CHECK: # %bb.0: # %entry
35 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
36 ; CHECK-NEXT: vmv.s.x v8, a0
39 %a = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8> %0, i8 %1, i32 %2)
40 ret <vscale x 4 x i8> %a
43 declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, i32)
45 define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
46 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
47 ; CHECK: # %bb.0: # %entry
48 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
49 ; CHECK-NEXT: vmv.s.x v8, a0
52 %a = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8> %0, i8 %1, i32 %2)
53 ret <vscale x 8 x i8> %a
56 declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, i32)
58 define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
59 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
60 ; CHECK: # %bb.0: # %entry
61 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
62 ; CHECK-NEXT: vmv.s.x v8, a0
65 %a = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8> %0, i8 %1, i32 %2)
66 ret <vscale x 16 x i8> %a
69 declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, i32)
71 define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
72 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
73 ; CHECK: # %bb.0: # %entry
74 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
75 ; CHECK-NEXT: vmv.s.x v8, a0
78 %a = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8> %0, i8 %1, i32 %2)
79 ret <vscale x 32 x i8> %a
82 declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, i32)
84 define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
85 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
86 ; CHECK: # %bb.0: # %entry
87 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
88 ; CHECK-NEXT: vmv.s.x v8, a0
91 %a = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8> %0, i8 %1, i32 %2)
92 ret <vscale x 64 x i8> %a
95 declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16, i32)
97 define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
98 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
99 ; CHECK: # %bb.0: # %entry
100 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
101 ; CHECK-NEXT: vmv.s.x v8, a0
104 %a = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16> %0, i16 %1, i32 %2)
105 ret <vscale x 1 x i16> %a
108 declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16, i32)
110 define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
111 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
112 ; CHECK: # %bb.0: # %entry
113 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
114 ; CHECK-NEXT: vmv.s.x v8, a0
117 %a = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16> %0, i16 %1, i32 %2)
118 ret <vscale x 2 x i16> %a
121 declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16, i32)
123 define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
124 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
125 ; CHECK: # %bb.0: # %entry
126 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
127 ; CHECK-NEXT: vmv.s.x v8, a0
130 %a = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16> %0, i16 %1, i32 %2)
131 ret <vscale x 4 x i16> %a
134 declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16, i32)
136 define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
137 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
138 ; CHECK: # %bb.0: # %entry
139 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
140 ; CHECK-NEXT: vmv.s.x v8, a0
143 %a = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16> %0, i16 %1, i32 %2)
144 ret <vscale x 8 x i16> %a
147 declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i16, i32)
149 define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
150 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
151 ; CHECK: # %bb.0: # %entry
152 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
153 ; CHECK-NEXT: vmv.s.x v8, a0
156 %a = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16> %0, i16 %1, i32 %2)
157 ret <vscale x 16 x i16> %a
160 declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i16, i32)
162 define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
163 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
164 ; CHECK: # %bb.0: # %entry
165 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
166 ; CHECK-NEXT: vmv.s.x v8, a0
169 %a = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16> %0, i16 %1, i32 %2)
170 ret <vscale x 32 x i16> %a
173 declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32, i32)
175 define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
176 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
177 ; CHECK: # %bb.0: # %entry
178 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
179 ; CHECK-NEXT: vmv.s.x v8, a0
182 %a = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2)
183 ret <vscale x 1 x i32> %a
186 declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32, i32)
188 define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
189 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
190 ; CHECK: # %bb.0: # %entry
191 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
192 ; CHECK-NEXT: vmv.s.x v8, a0
195 %a = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2)
196 ret <vscale x 2 x i32> %a
199 declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32, i32)
201 define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
202 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
203 ; CHECK: # %bb.0: # %entry
204 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
205 ; CHECK-NEXT: vmv.s.x v8, a0
208 %a = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2)
209 ret <vscale x 4 x i32> %a
212 declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32, i32)
214 define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
215 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
216 ; CHECK: # %bb.0: # %entry
217 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
218 ; CHECK-NEXT: vmv.s.x v8, a0
221 %a = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2)
222 ret <vscale x 8 x i32> %a
225 declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i32, i32)
227 define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
228 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
229 ; CHECK: # %bb.0: # %entry
230 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
231 ; CHECK-NEXT: vmv.s.x v8, a0
234 %a = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2)
235 ret <vscale x 16 x i32> %a
238 declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64>, i64, i32);
240 define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
241 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
242 ; CHECK: # %bb.0: # %entry
243 ; CHECK-NEXT: addi sp, sp, -16
244 ; CHECK-NEXT: sw a1, 12(sp)
245 ; CHECK-NEXT: sw a0, 8(sp)
246 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
247 ; CHECK-NEXT: vid.v v9
248 ; CHECK-NEXT: vmseq.vi v0, v9, 0
249 ; CHECK-NEXT: addi a0, sp, 8
250 ; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t
251 ; CHECK-NEXT: addi sp, sp, 16
254 %a = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64> %0, i64 %1, i32 %2)
255 ret <vscale x 1 x i64> %a
258 declare <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64>, i64, i32);
260 define <vscale x 2 x i64> @intrinsic_vmv.s.x_x_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
261 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i64:
262 ; CHECK: # %bb.0: # %entry
263 ; CHECK-NEXT: addi sp, sp, -16
264 ; CHECK-NEXT: sw a1, 12(sp)
265 ; CHECK-NEXT: sw a0, 8(sp)
266 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
267 ; CHECK-NEXT: vid.v v10
268 ; CHECK-NEXT: vmseq.vi v0, v10, 0
269 ; CHECK-NEXT: addi a0, sp, 8
270 ; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t
271 ; CHECK-NEXT: addi sp, sp, 16
274 %a = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64> %0, i64 %1, i32 %2)
275 ret <vscale x 2 x i64> %a
278 declare <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64>, i64, i32);
280 define <vscale x 4 x i64> @intrinsic_vmv.s.x_x_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
281 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i64:
282 ; CHECK: # %bb.0: # %entry
283 ; CHECK-NEXT: addi sp, sp, -16
284 ; CHECK-NEXT: sw a1, 12(sp)
285 ; CHECK-NEXT: sw a0, 8(sp)
286 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
287 ; CHECK-NEXT: vid.v v12
288 ; CHECK-NEXT: vmseq.vi v0, v12, 0
289 ; CHECK-NEXT: addi a0, sp, 8
290 ; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t
291 ; CHECK-NEXT: addi sp, sp, 16
294 %a = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64> %0, i64 %1, i32 %2)
295 ret <vscale x 4 x i64> %a
298 declare <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64>, i64, i32);
300 define <vscale x 8 x i64> @intrinsic_vmv.s.x_x_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
301 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i64:
302 ; CHECK: # %bb.0: # %entry
303 ; CHECK-NEXT: addi sp, sp, -16
304 ; CHECK-NEXT: sw a1, 12(sp)
305 ; CHECK-NEXT: sw a0, 8(sp)
306 ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
307 ; CHECK-NEXT: vid.v v16
308 ; CHECK-NEXT: vmseq.vi v0, v16, 0
309 ; CHECK-NEXT: addi a0, sp, 8
310 ; CHECK-NEXT: vlse64.v v8, (a0), zero, v0.t
311 ; CHECK-NEXT: addi sp, sp, 16
314 %a = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64> %0, i64 %1, i32 %2)
315 ret <vscale x 8 x i64> %a