1 ; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
3 ;;; Test vector shift left and add intrinsic instructions
6 ;;; We test VSFA*vrrl, VSFA*vrrl_v, VSFA*virl, VSFA*virl_v, VSFA*vrrml_v, and
7 ;;; VSFA*virml_v instructions.
9 ; Function Attrs: nounwind readnone
10 define fastcc <256 x double> @vsfa_vvssl(<256 x double> %0, i64 %1, i64 %2) {
11 ; CHECK-LABEL: vsfa_vvssl:
13 ; CHECK-NEXT: lea %s2, 256
15 ; CHECK-NEXT: vsfa %v0, %v0, %s0, %s1
16 ; CHECK-NEXT: b.l.t (, %s10)
17 %4 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double> %0, i64 %1, i64 %2, i32 256)
21 ; Function Attrs: nounwind readnone
22 declare <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double>, i64, i64, i32)
24 ; Function Attrs: nounwind readnone
25 define fastcc <256 x double> @vsfa_vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3) {
26 ; CHECK-LABEL: vsfa_vvssvl:
28 ; CHECK-NEXT: lea %s2, 128
30 ; CHECK-NEXT: vsfa %v1, %v0, %s0, %s1
31 ; CHECK-NEXT: lea %s16, 256
32 ; CHECK-NEXT: lvl %s16
33 ; CHECK-NEXT: vor %v0, (0)1, %v1
34 ; CHECK-NEXT: b.l.t (, %s10)
35 %5 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssvl(<256 x double> %0, i64 %1, i64 %2, <256 x double> %3, i32 128)
39 ; Function Attrs: nounwind readnone
40 declare <256 x double> @llvm.ve.vl.vsfa.vvssvl(<256 x double>, i64, i64, <256 x double>, i32)
42 ; Function Attrs: nounwind readnone
43 define fastcc <256 x double> @vsfa_vvssl_imm(<256 x double> %0, i64 %1) {
44 ; CHECK-LABEL: vsfa_vvssl_imm:
46 ; CHECK-NEXT: lea %s1, 256
48 ; CHECK-NEXT: vsfa %v0, %v0, 8, %s0
49 ; CHECK-NEXT: b.l.t (, %s10)
50 %3 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double> %0, i64 8, i64 %1, i32 256)
54 ; Function Attrs: nounwind readnone
55 define fastcc <256 x double> @vsfa_vvssvl_imm(<256 x double> %0, i64 %1, <256 x double> %2) {
56 ; CHECK-LABEL: vsfa_vvssvl_imm:
58 ; CHECK-NEXT: lea %s1, 128
60 ; CHECK-NEXT: vsfa %v1, %v0, 8, %s0
61 ; CHECK-NEXT: lea %s16, 256
62 ; CHECK-NEXT: lvl %s16
63 ; CHECK-NEXT: vor %v0, (0)1, %v1
64 ; CHECK-NEXT: b.l.t (, %s10)
65 %4 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssvl(<256 x double> %0, i64 8, i64 %1, <256 x double> %2, i32 128)
69 ; Function Attrs: nounwind readnone
70 define fastcc <256 x double> @vsfa_vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4) {
71 ; CHECK-LABEL: vsfa_vvssmvl:
73 ; CHECK-NEXT: lea %s2, 128
75 ; CHECK-NEXT: vsfa %v1, %v0, %s0, %s1, %vm1
76 ; CHECK-NEXT: lea %s16, 256
77 ; CHECK-NEXT: lvl %s16
78 ; CHECK-NEXT: vor %v0, (0)1, %v1
79 ; CHECK-NEXT: b.l.t (, %s10)
80 %6 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double> %0, i64 %1, i64 %2, <256 x i1> %3, <256 x double> %4, i32 128)
84 ; Function Attrs: nounwind readnone
85 declare <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double>, i64, i64, <256 x i1>, <256 x double>, i32)
87 ; Function Attrs: nounwind readnone
88 define fastcc <256 x double> @vsfa_vvssmvl_imm(<256 x double> %0, i64 %1, <256 x i1> %2, <256 x double> %3) {
89 ; CHECK-LABEL: vsfa_vvssmvl_imm:
91 ; CHECK-NEXT: lea %s1, 128
93 ; CHECK-NEXT: vsfa %v1, %v0, 8, %s0, %vm1
94 ; CHECK-NEXT: lea %s16, 256
95 ; CHECK-NEXT: lvl %s16
96 ; CHECK-NEXT: vor %v0, (0)1, %v1
97 ; CHECK-NEXT: b.l.t (, %s10)
98 %5 = tail call fast <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double> %0, i64 8, i64 %1, <256 x i1> %2, <256 x double> %3, i32 128)