1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8>, i8, iXLen);
9 define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
10 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
11 ; CHECK: # %bb.0: # %entry
12 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
13 ; CHECK-NEXT: vmv.s.x v8, a0
16 %a = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2)
17 ret <vscale x 1 x i8> %a
20 declare <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8>, i8, iXLen);
22 define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
23 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
24 ; CHECK: # %bb.0: # %entry
25 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
26 ; CHECK-NEXT: vmv.s.x v8, a0
29 %a = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2)
30 ret <vscale x 2 x i8> %a
33 declare <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8>, i8, iXLen);
35 define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
36 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
39 ; CHECK-NEXT: vmv.s.x v8, a0
42 %a = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2)
43 ret <vscale x 4 x i8> %a
46 declare <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8>, i8, iXLen);
48 define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
49 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
50 ; CHECK: # %bb.0: # %entry
51 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
52 ; CHECK-NEXT: vmv.s.x v8, a0
55 %a = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2)
56 ret <vscale x 8 x i8> %a
59 declare <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8>, i8, iXLen);
61 define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
62 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
65 ; CHECK-NEXT: vmv.s.x v8, a0
68 %a = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2)
69 ret <vscale x 16 x i8> %a
72 declare <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8>, i8, iXLen);
74 define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
75 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
76 ; CHECK: # %bb.0: # %entry
77 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
78 ; CHECK-NEXT: vmv.s.x v8, a0
81 %a = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2)
82 ret <vscale x 32 x i8> %a
85 declare <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8>, i8, iXLen);
87 define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
88 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
89 ; CHECK: # %bb.0: # %entry
90 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
91 ; CHECK-NEXT: vmv.s.x v8, a0
94 %a = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2)
95 ret <vscale x 64 x i8> %a
98 declare <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16>, i16, iXLen);
100 define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
101 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
102 ; CHECK: # %bb.0: # %entry
103 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
104 ; CHECK-NEXT: vmv.s.x v8, a0
107 %a = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2)
108 ret <vscale x 1 x i16> %a
111 declare <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16>, i16, iXLen);
113 define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
114 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
115 ; CHECK: # %bb.0: # %entry
116 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
117 ; CHECK-NEXT: vmv.s.x v8, a0
120 %a = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2)
121 ret <vscale x 2 x i16> %a
124 declare <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16>, i16, iXLen);
126 define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
127 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
128 ; CHECK: # %bb.0: # %entry
129 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
130 ; CHECK-NEXT: vmv.s.x v8, a0
133 %a = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2)
134 ret <vscale x 4 x i16> %a
137 declare <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16>, i16, iXLen);
139 define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
140 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
141 ; CHECK: # %bb.0: # %entry
142 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
143 ; CHECK-NEXT: vmv.s.x v8, a0
146 %a = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2)
147 ret <vscale x 8 x i16> %a
150 declare <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16>, i16, iXLen);
152 define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
153 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
154 ; CHECK: # %bb.0: # %entry
155 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
156 ; CHECK-NEXT: vmv.s.x v8, a0
159 %a = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2)
160 ret <vscale x 16 x i16> %a
163 declare <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16>, i16, iXLen);
165 define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
166 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
167 ; CHECK: # %bb.0: # %entry
168 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
169 ; CHECK-NEXT: vmv.s.x v8, a0
172 %a = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2)
173 ret <vscale x 32 x i16> %a
176 declare <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32>, i32, iXLen);
178 define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
179 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
182 ; CHECK-NEXT: vmv.s.x v8, a0
185 %a = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2)
186 ret <vscale x 1 x i32> %a
189 declare <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32>, i32, iXLen);
191 define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
192 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
193 ; CHECK: # %bb.0: # %entry
194 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
195 ; CHECK-NEXT: vmv.s.x v8, a0
198 %a = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2)
199 ret <vscale x 2 x i32> %a
202 declare <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32>, i32, iXLen);
204 define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
205 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
206 ; CHECK: # %bb.0: # %entry
207 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
208 ; CHECK-NEXT: vmv.s.x v8, a0
211 %a = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2)
212 ret <vscale x 4 x i32> %a
215 declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32>, i32, iXLen);
217 define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
218 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
219 ; CHECK: # %bb.0: # %entry
220 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
221 ; CHECK-NEXT: vmv.s.x v8, a0
224 %a = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2)
225 ret <vscale x 8 x i32> %a
228 declare <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32>, i32, iXLen);
230 define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
231 ; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
234 ; CHECK-NEXT: vmv.s.x v8, a0
237 %a = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2)
238 ret <vscale x 16 x i32> %a
241 declare <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64>, i64, iXLen);
243 define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
244 ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
245 ; RV32: # %bb.0: # %entry
246 ; RV32-NEXT: addi sp, sp, -16
247 ; RV32-NEXT: sw a0, 8(sp)
248 ; RV32-NEXT: sw a1, 12(sp)
249 ; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
250 ; RV32-NEXT: vid.v v9
251 ; RV32-NEXT: vmseq.vi v0, v9, 0
252 ; RV32-NEXT: addi a0, sp, 8
253 ; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t
254 ; RV32-NEXT: addi sp, sp, 16
257 ; RV64-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
258 ; RV64: # %bb.0: # %entry
259 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
260 ; RV64-NEXT: vmv.s.x v8, a0
263 %a = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2)
264 ret <vscale x 1 x i64> %a
267 declare <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64>, i64, iXLen);
269 define <vscale x 2 x i64> @intrinsic_vmv.s.x_x_nxv2i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
270 ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv2i64:
271 ; RV32: # %bb.0: # %entry
272 ; RV32-NEXT: addi sp, sp, -16
273 ; RV32-NEXT: sw a0, 8(sp)
274 ; RV32-NEXT: sw a1, 12(sp)
275 ; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
276 ; RV32-NEXT: vid.v v10
277 ; RV32-NEXT: vmseq.vi v0, v10, 0
278 ; RV32-NEXT: addi a0, sp, 8
279 ; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t
280 ; RV32-NEXT: addi sp, sp, 16
283 ; RV64-LABEL: intrinsic_vmv.s.x_x_nxv2i64:
284 ; RV64: # %bb.0: # %entry
285 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
286 ; RV64-NEXT: vmv.s.x v8, a0
289 %a = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2)
290 ret <vscale x 2 x i64> %a
293 declare <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64>, i64, iXLen);
295 define <vscale x 4 x i64> @intrinsic_vmv.s.x_x_nxv4i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
296 ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv4i64:
297 ; RV32: # %bb.0: # %entry
298 ; RV32-NEXT: addi sp, sp, -16
299 ; RV32-NEXT: sw a0, 8(sp)
300 ; RV32-NEXT: sw a1, 12(sp)
301 ; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
302 ; RV32-NEXT: vid.v v12
303 ; RV32-NEXT: vmseq.vi v0, v12, 0
304 ; RV32-NEXT: addi a0, sp, 8
305 ; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t
306 ; RV32-NEXT: addi sp, sp, 16
309 ; RV64-LABEL: intrinsic_vmv.s.x_x_nxv4i64:
310 ; RV64: # %bb.0: # %entry
311 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
312 ; RV64-NEXT: vmv.s.x v8, a0
315 %a = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2)
316 ret <vscale x 4 x i64> %a
319 declare <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64>, i64, iXLen);
321 define <vscale x 8 x i64> @intrinsic_vmv.s.x_x_nxv8i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
322 ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv8i64:
323 ; RV32: # %bb.0: # %entry
324 ; RV32-NEXT: addi sp, sp, -16
325 ; RV32-NEXT: sw a0, 8(sp)
326 ; RV32-NEXT: sw a1, 12(sp)
327 ; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
328 ; RV32-NEXT: vid.v v16
329 ; RV32-NEXT: vmseq.vi v0, v16, 0
330 ; RV32-NEXT: addi a0, sp, 8
331 ; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t
332 ; RV32-NEXT: addi sp, sp, 16
335 ; RV64-LABEL: intrinsic_vmv.s.x_x_nxv8i64:
336 ; RV64: # %bb.0: # %entry
337 ; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
338 ; RV64-NEXT: vmv.s.x v8, a0
341 %a = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2)
342 ret <vscale x 8 x i64> %a
345 ; We should not emit a tail agnostic vlse for a tail undisturbed vmv.s.x
346 define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64_bug(<vscale x 1 x i64> %0, ptr %1) nounwind {
347 ; RV32-LABEL: intrinsic_vmv.s.x_x_nxv1i64_bug:
348 ; RV32: # %bb.0: # %entry
349 ; RV32-NEXT: addi sp, sp, -16
350 ; RV32-NEXT: lw a1, 0(a0)
351 ; RV32-NEXT: lw a0, 4(a0)
352 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
353 ; RV32-NEXT: vid.v v9
354 ; RV32-NEXT: vmseq.vi v0, v9, 0
355 ; RV32-NEXT: sw a1, 8(sp)
356 ; RV32-NEXT: sw a0, 12(sp)
357 ; RV32-NEXT: addi a0, sp, 8
358 ; RV32-NEXT: vlse64.v v8, (a0), zero, v0.t
359 ; RV32-NEXT: addi sp, sp, 16
362 ; RV64-LABEL: intrinsic_vmv.s.x_x_nxv1i64_bug:
363 ; RV64: # %bb.0: # %entry
364 ; RV64-NEXT: ld a0, 0(a0)
365 ; RV64-NEXT: vsetivli zero, 1, e64, m1, tu, ma
366 ; RV64-NEXT: vmv.s.x v8, a0
369 %a = load i64, ptr %1, align 8
370 %b = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64(<vscale x 1 x i64> %0, i64 %a, iXLen 1)
371 ret <vscale x 1 x i64> %b