1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; Test vector rotate left instructions with vector rotate amount.
4 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
6 declare <16 x i8> @llvm.fshl.v16i8(<16 x i8>, <16 x i8>, <16 x i8>)
7 declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
8 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
9 declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
11 ; Test a v16i8 rotate left.
12 define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val, <16 x i8> %amt) {
15 ; CHECK-NEXT: verllvb %v24, %v26, %v28
18 %inv = sub <16 x i8> <i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8,
19 i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8>, %amt
20 %parta = shl <16 x i8> %val, %amt
21 %partb = lshr <16 x i8> %val, %inv
23 %rotl = or <16 x i8> %parta, %partb
28 ; Test a v16i8 rotate left (matched from fshl).
29 define <16 x i8> @f2(<16 x i8> %dummy, <16 x i8> %val, <16 x i8> %amt) {
32 ; CHECK-NEXT: verllvb %v24, %v26, %v28
35 %rotl = tail call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %val, <16 x i8> %val, <16 x i8> %amt)
40 ; Test a v8i16 rotate left.
41 define <8 x i16> @f3(<8 x i16> %dummy, <8 x i16> %val, <8 x i16> %amt) {
44 ; CHECK-NEXT: verllvh %v24, %v26, %v28
47 %inv = sub <8 x i16> <i16 16, i16 16, i16 16, i16 16,
48 i16 16, i16 16, i16 16, i16 16>, %amt
49 %parta = shl <8 x i16> %val, %amt
50 %partb = lshr <8 x i16> %val, %inv
52 %rotl = or <8 x i16> %parta, %partb
57 ; Test a v8i16 rotate left (matched from fshl).
58 define <8 x i16> @f4(<8 x i16> %dummy, <8 x i16> %val, <8 x i16> %amt) {
61 ; CHECK-NEXT: verllvh %v24, %v26, %v28
64 %rotl = tail call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %val, <8 x i16> %val, <8 x i16> %amt)
69 ; Test a v4i32 rotate left.
70 define <4 x i32> @f5(<4 x i32> %dummy, <4 x i32> %val, <4 x i32> %amt) {
73 ; CHECK-NEXT: verllvf %v24, %v26, %v28
76 %inv = sub <4 x i32> <i32 32, i32 32, i32 32, i32 32>, %amt
77 %parta = shl <4 x i32> %val, %amt
78 %partb = lshr <4 x i32> %val, %inv
80 %rotl = or <4 x i32> %parta, %partb
85 ; Test a v4i32 rotate left (matched from fshl).
86 define <4 x i32> @f6(<4 x i32> %dummy, <4 x i32> %val, <4 x i32> %amt) {
89 ; CHECK-NEXT: verllvf %v24, %v26, %v28
92 %rotl = tail call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %val, <4 x i32> %val, <4 x i32> %amt)
97 ; Test a v2i64 rotate left.
98 define <2 x i64> @f7(<2 x i64> %dummy, <2 x i64> %val, <2 x i64> %amt) {
101 ; CHECK-NEXT: verllvg %v24, %v26, %v28
102 ; CHECK-NEXT: br %r14
104 %inv = sub <2 x i64> <i64 64, i64 64>, %amt
105 %parta = shl <2 x i64> %val, %amt
106 %partb = lshr <2 x i64> %val, %inv
108 %rotl = or <2 x i64> %parta, %partb
113 ; Test a v2i64 rotate left (matched from fshl).
114 define <2 x i64> @f8(<2 x i64> %dummy, <2 x i64> %val, <2 x i64> %amt) {
117 ; CHECK-NEXT: verllvg %v24, %v26, %v28
118 ; CHECK-NEXT: br %r14
120 %rotl = tail call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %val, <2 x i64> %val, <2 x i64> %amt)