1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5 ; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
7 declare i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8>)
9 define signext i8 @intrinsic_vmv.x.s_s_nxv1i8(<vscale x 1 x i8> %0) nounwind {
10 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i8:
11 ; CHECK: # %bb.0: # %entry
12 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
13 ; CHECK-NEXT: vmv.x.s a0, v8
16 %a = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> %0)
20 declare i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8>)
22 define signext i8 @intrinsic_vmv.x.s_s_nxv2i8(<vscale x 2 x i8> %0) nounwind {
23 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i8:
24 ; CHECK: # %bb.0: # %entry
25 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
26 ; CHECK-NEXT: vmv.x.s a0, v8
29 %a = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> %0)
33 declare i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8>)
35 define signext i8 @intrinsic_vmv.x.s_s_nxv4i8(<vscale x 4 x i8> %0) nounwind {
36 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i8:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
39 ; CHECK-NEXT: vmv.x.s a0, v8
42 %a = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> %0)
46 declare i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8>)
48 define signext i8 @intrinsic_vmv.x.s_s_nxv8i8(<vscale x 8 x i8> %0) nounwind {
49 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i8:
50 ; CHECK: # %bb.0: # %entry
51 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
52 ; CHECK-NEXT: vmv.x.s a0, v8
55 %a = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> %0)
59 declare i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8>)
61 define signext i8 @intrinsic_vmv.x.s_s_nxv16i8(<vscale x 16 x i8> %0) nounwind {
62 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i8:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
65 ; CHECK-NEXT: vmv.x.s a0, v8
68 %a = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> %0)
72 declare i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8>)
74 define signext i8 @intrinsic_vmv.x.s_s_nxv32i8(<vscale x 32 x i8> %0) nounwind {
75 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i8:
76 ; CHECK: # %bb.0: # %entry
77 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
78 ; CHECK-NEXT: vmv.x.s a0, v8
81 %a = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> %0)
85 declare i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8>)
87 define signext i8 @intrinsic_vmv.x.s_s_nxv64i8(<vscale x 64 x i8> %0) nounwind {
88 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv64i8:
89 ; CHECK: # %bb.0: # %entry
90 ; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
91 ; CHECK-NEXT: vmv.x.s a0, v8
94 %a = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> %0)
98 declare i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16>)
100 define signext i16 @intrinsic_vmv.x.s_s_nxv1i16(<vscale x 1 x i16> %0) nounwind {
101 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i16:
102 ; CHECK: # %bb.0: # %entry
103 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
104 ; CHECK-NEXT: vmv.x.s a0, v8
107 %a = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> %0)
111 declare i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16>)
113 define signext i16 @intrinsic_vmv.x.s_s_nxv2i16(<vscale x 2 x i16> %0) nounwind {
114 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i16:
115 ; CHECK: # %bb.0: # %entry
116 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
117 ; CHECK-NEXT: vmv.x.s a0, v8
120 %a = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> %0)
124 declare i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16>)
126 define signext i16 @intrinsic_vmv.x.s_s_nxv4i16(<vscale x 4 x i16> %0) nounwind {
127 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i16:
128 ; CHECK: # %bb.0: # %entry
129 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
130 ; CHECK-NEXT: vmv.x.s a0, v8
133 %a = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> %0)
137 declare i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16>)
139 define signext i16 @intrinsic_vmv.x.s_s_nxv8i16(<vscale x 8 x i16> %0) nounwind {
140 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i16:
141 ; CHECK: # %bb.0: # %entry
142 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
143 ; CHECK-NEXT: vmv.x.s a0, v8
146 %a = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> %0)
150 declare i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16>)
152 define signext i16 @intrinsic_vmv.x.s_s_nxv16i16(<vscale x 16 x i16> %0) nounwind {
153 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i16:
154 ; CHECK: # %bb.0: # %entry
155 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
156 ; CHECK-NEXT: vmv.x.s a0, v8
159 %a = call i16 @llvm.riscv.vmv.x.s.nxv16i16( <vscale x 16 x i16> %0)
163 declare i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16>)
165 define signext i16 @intrinsic_vmv.x.s_s_nxv32i16(<vscale x 32 x i16> %0) nounwind {
166 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv32i16:
167 ; CHECK: # %bb.0: # %entry
168 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
169 ; CHECK-NEXT: vmv.x.s a0, v8
172 %a = call i16 @llvm.riscv.vmv.x.s.nxv32i16( <vscale x 32 x i16> %0)
176 declare i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32>)
178 define signext i32 @intrinsic_vmv.x.s_s_nxv1i32(<vscale x 1 x i32> %0) nounwind {
179 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv1i32:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
182 ; CHECK-NEXT: vmv.x.s a0, v8
185 %a = call i32 @llvm.riscv.vmv.x.s.nxv1i32( <vscale x 1 x i32> %0)
189 declare i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32>)
191 define signext i32 @intrinsic_vmv.x.s_s_nxv2i32(<vscale x 2 x i32> %0) nounwind {
192 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv2i32:
193 ; CHECK: # %bb.0: # %entry
194 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
195 ; CHECK-NEXT: vmv.x.s a0, v8
198 %a = call i32 @llvm.riscv.vmv.x.s.nxv2i32( <vscale x 2 x i32> %0)
202 declare i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32>)
204 define signext i32 @intrinsic_vmv.x.s_s_nxv4i32(<vscale x 4 x i32> %0) nounwind {
205 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv4i32:
206 ; CHECK: # %bb.0: # %entry
207 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
208 ; CHECK-NEXT: vmv.x.s a0, v8
211 %a = call i32 @llvm.riscv.vmv.x.s.nxv4i32( <vscale x 4 x i32> %0)
215 declare i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32>)
217 define signext i32 @intrinsic_vmv.x.s_s_nxv8i32(<vscale x 8 x i32> %0) nounwind {
218 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv8i32:
219 ; CHECK: # %bb.0: # %entry
220 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
221 ; CHECK-NEXT: vmv.x.s a0, v8
224 %a = call i32 @llvm.riscv.vmv.x.s.nxv8i32( <vscale x 8 x i32> %0)
228 declare i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32>)
230 define signext i32 @intrinsic_vmv.x.s_s_nxv16i32(<vscale x 16 x i32> %0) nounwind {
231 ; CHECK-LABEL: intrinsic_vmv.x.s_s_nxv16i32:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
234 ; CHECK-NEXT: vmv.x.s a0, v8
237 %a = call i32 @llvm.riscv.vmv.x.s.nxv16i32( <vscale x 16 x i32> %0)
241 declare i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64>)
243 define i64 @intrinsic_vmv.x.s_s_nxv1i64(<vscale x 1 x i64> %0) nounwind {
244 ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv1i64:
245 ; RV32: # %bb.0: # %entry
246 ; RV32-NEXT: li a0, 32
247 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
248 ; RV32-NEXT: vsrl.vx v9, v8, a0
249 ; RV32-NEXT: vmv.x.s a1, v9
250 ; RV32-NEXT: vmv.x.s a0, v8
253 ; RV64-LABEL: intrinsic_vmv.x.s_s_nxv1i64:
254 ; RV64: # %bb.0: # %entry
255 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
256 ; RV64-NEXT: vmv.x.s a0, v8
259 %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64( <vscale x 1 x i64> %0)
263 declare i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64>)
265 define i64 @intrinsic_vmv.x.s_s_nxv2i64(<vscale x 2 x i64> %0) nounwind {
266 ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv2i64:
267 ; RV32: # %bb.0: # %entry
268 ; RV32-NEXT: li a0, 32
269 ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
270 ; RV32-NEXT: vsrl.vx v10, v8, a0
271 ; RV32-NEXT: vmv.x.s a1, v10
272 ; RV32-NEXT: vmv.x.s a0, v8
275 ; RV64-LABEL: intrinsic_vmv.x.s_s_nxv2i64:
276 ; RV64: # %bb.0: # %entry
277 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
278 ; RV64-NEXT: vmv.x.s a0, v8
281 %a = call i64 @llvm.riscv.vmv.x.s.nxv2i64( <vscale x 2 x i64> %0)
285 declare i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64>)
287 define i64 @intrinsic_vmv.x.s_s_nxv4i64(<vscale x 4 x i64> %0) nounwind {
288 ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv4i64:
289 ; RV32: # %bb.0: # %entry
290 ; RV32-NEXT: li a0, 32
291 ; RV32-NEXT: vsetivli zero, 1, e64, m4, ta, ma
292 ; RV32-NEXT: vsrl.vx v12, v8, a0
293 ; RV32-NEXT: vmv.x.s a1, v12
294 ; RV32-NEXT: vmv.x.s a0, v8
297 ; RV64-LABEL: intrinsic_vmv.x.s_s_nxv4i64:
298 ; RV64: # %bb.0: # %entry
299 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
300 ; RV64-NEXT: vmv.x.s a0, v8
303 %a = call i64 @llvm.riscv.vmv.x.s.nxv4i64( <vscale x 4 x i64> %0)
307 declare i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64>)
309 define i64 @intrinsic_vmv.x.s_s_nxv8i64(<vscale x 8 x i64> %0) nounwind {
310 ; RV32-LABEL: intrinsic_vmv.x.s_s_nxv8i64:
311 ; RV32: # %bb.0: # %entry
312 ; RV32-NEXT: li a0, 32
313 ; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma
314 ; RV32-NEXT: vsrl.vx v16, v8, a0
315 ; RV32-NEXT: vmv.x.s a1, v16
316 ; RV32-NEXT: vmv.x.s a0, v8
319 ; RV64-LABEL: intrinsic_vmv.x.s_s_nxv8i64:
320 ; RV64: # %bb.0: # %entry
321 ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
322 ; RV64-NEXT: vmv.x.s a0, v8
325 %a = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> %0)