1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
3 ;;; Test vector move intrinsic instructions
6 ;;; We test VMVivl and VMVivl_v, and VMVivml_v instructions.
8 ; Function Attrs: nounwind
9 define void @vmv_vsvl(ptr %0, i32 signext %1) {
10 ; CHECK-LABEL: vmv_vsvl:
12 ; CHECK-NEXT: lea %s2, 256
14 ; CHECK-NEXT: vld %v0, 8, %s0
15 ; CHECK-NEXT: and %s1, %s1, (32)0
16 ; CHECK-NEXT: vmv %v0, %s1, %v0
17 ; CHECK-NEXT: vst %v0, 8, %s0
18 ; CHECK-NEXT: b.l.t (, %s10)
19 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
20 %4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvl(i32 %1, <256 x double> %3, i32 256)
21 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %0, i32 256)
25 ; Function Attrs: nounwind readonly
26 declare <256 x double> @llvm.ve.vl.vld.vssl(i64, ptr, i32)
28 ; Function Attrs: nounwind readnone
29 declare <256 x double> @llvm.ve.vl.vmv.vsvl(i32, <256 x double>, i32)
31 ; Function Attrs: nounwind writeonly
32 declare void @llvm.ve.vl.vst.vssl(<256 x double>, i64, ptr, i32)
34 ; Function Attrs: nounwind
35 define void @vmv_vsvl_imm(ptr %0) {
36 ; CHECK-LABEL: vmv_vsvl_imm:
38 ; CHECK-NEXT: lea %s1, 256
40 ; CHECK-NEXT: vld %v0, 8, %s0
41 ; CHECK-NEXT: vmv %v0, 31, %v0
42 ; CHECK-NEXT: vst %v0, 8, %s0
43 ; CHECK-NEXT: b.l.t (, %s10)
44 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
45 %3 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvl(i32 31, <256 x double> %2, i32 256)
46 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, ptr %0, i32 256)
50 ; Function Attrs: nounwind
51 define void @vmv_vsvvl(ptr %0, i32 signext %1) {
52 ; CHECK-LABEL: vmv_vsvvl:
54 ; CHECK-NEXT: lea %s2, 256
56 ; CHECK-NEXT: vld %v0, 8, %s0
57 ; CHECK-NEXT: and %s1, %s1, (32)0
58 ; CHECK-NEXT: lea %s3, 128
60 ; CHECK-NEXT: vmv %v0, %s1, %v0
62 ; CHECK-NEXT: vst %v0, 8, %s0
63 ; CHECK-NEXT: b.l.t (, %s10)
64 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
65 %4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvvl(i32 %1, <256 x double> %3, <256 x double> %3, i32 128)
66 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %0, i32 256)
70 ; Function Attrs: nounwind readnone
71 declare <256 x double> @llvm.ve.vl.vmv.vsvvl(i32, <256 x double>, <256 x double>, i32)
73 ; Function Attrs: nounwind
74 define void @vmv_vsvvl_imm(ptr %0) {
75 ; CHECK-LABEL: vmv_vsvvl_imm:
77 ; CHECK-NEXT: lea %s1, 256
79 ; CHECK-NEXT: vld %v0, 8, %s0
80 ; CHECK-NEXT: lea %s2, 128
82 ; CHECK-NEXT: vmv %v0, 31, %v0
84 ; CHECK-NEXT: vst %v0, 8, %s0
85 ; CHECK-NEXT: b.l.t (, %s10)
86 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
87 %3 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvvl(i32 31, <256 x double> %2, <256 x double> %2, i32 128)
88 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, ptr %0, i32 256)
92 ; Function Attrs: nounwind
93 define void @vmv_vsvmvl(ptr %0, i32 signext %1) {
94 ; CHECK-LABEL: vmv_vsvmvl:
96 ; CHECK-NEXT: lea %s2, 256
98 ; CHECK-NEXT: vld %v0, 8, %s0
99 ; CHECK-NEXT: and %s1, %s1, (32)0
100 ; CHECK-NEXT: lea %s3, 128
101 ; CHECK-NEXT: lvl %s3
102 ; CHECK-NEXT: vmv %v0, %s1, %v0, %vm1
103 ; CHECK-NEXT: lvl %s2
104 ; CHECK-NEXT: vst %v0, 8, %s0
105 ; CHECK-NEXT: b.l.t (, %s10)
106 %3 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
107 %4 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32 %1, <256 x double> %3, <256 x i1> undef, <256 x double> %3, i32 128)
108 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %4, i64 8, ptr %0, i32 256)
112 ; Function Attrs: nounwind readnone
113 declare <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32, <256 x double>, <256 x i1>, <256 x double>, i32)
115 ; Function Attrs: nounwind
116 define void @vmv_vsvmvl_imm(ptr %0) {
117 ; CHECK-LABEL: vmv_vsvmvl_imm:
119 ; CHECK-NEXT: lea %s1, 256
120 ; CHECK-NEXT: lvl %s1
121 ; CHECK-NEXT: vld %v0, 8, %s0
122 ; CHECK-NEXT: lea %s2, 128
123 ; CHECK-NEXT: lvl %s2
124 ; CHECK-NEXT: vmv %v0, 31, %v0, %vm1
125 ; CHECK-NEXT: lvl %s1
126 ; CHECK-NEXT: vst %v0, 8, %s0
127 ; CHECK-NEXT: b.l.t (, %s10)
128 %2 = tail call fast <256 x double> @llvm.ve.vl.vld.vssl(i64 8, ptr %0, i32 256)
129 %3 = tail call fast <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32 31, <256 x double> %2, <256 x i1> undef, <256 x double> %2, i32 128)
130 tail call void @llvm.ve.vl.vst.vssl(<256 x double> %3, i64 8, ptr %0, i32 256)