1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
4 declare i8 @llvm.fshl.i8(i8, i8, i8)
5 declare i16 @llvm.fshl.i16(i16, i16, i16)
6 declare i32 @llvm.fshl.i32(i32, i32, i32)
7 declare i64 @llvm.fshl.i64(i64, i64, i64)
8 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
10 declare i8 @llvm.fshr.i8(i8, i8, i8)
11 declare i16 @llvm.fshr.i16(i16, i16, i16)
12 declare i32 @llvm.fshr.i32(i32, i32, i32)
13 declare i64 @llvm.fshr.i64(i64, i64, i64)
14 declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
16 ; When first 2 operands match, it's a rotate.
18 define i8 @rotl_i8_const_shift(i8 %x) {
19 ; CHECK-LABEL: rotl_i8_const_shift:
21 ; CHECK-NEXT: ubfx w8, w0, #5, #3
22 ; CHECK-NEXT: bfi w8, w0, #3, #29
23 ; CHECK-NEXT: mov w0, w8
25 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 3)
29 define i64 @rotl_i64_const_shift(i64 %x) {
30 ; CHECK-LABEL: rotl_i64_const_shift:
32 ; CHECK-NEXT: ror x0, x0, #61
34 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 3)
38 ; When first 2 operands match, it's a rotate (by variable amount).
40 define i16 @rotl_i16(i16 %x, i16 %z) {
41 ; CHECK-LABEL: rotl_i16:
43 ; CHECK-NEXT: neg w10, w1
44 ; CHECK-NEXT: and w8, w0, #0xffff
45 ; CHECK-NEXT: and w9, w1, #0xf
46 ; CHECK-NEXT: and w10, w10, #0xf
47 ; CHECK-NEXT: lsl w9, w0, w9
48 ; CHECK-NEXT: lsr w8, w8, w10
49 ; CHECK-NEXT: orr w0, w9, w8
51 %f = call i16 @llvm.fshl.i16(i16 %x, i16 %x, i16 %z)
55 define i32 @rotl_i32(i32 %x, i32 %z) {
56 ; CHECK-LABEL: rotl_i32:
58 ; CHECK-NEXT: neg w8, w1
59 ; CHECK-NEXT: ror w0, w0, w8
61 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %z)
65 define i64 @rotl_i64(i64 %x, i64 %z) {
66 ; CHECK-LABEL: rotl_i64:
68 ; CHECK-NEXT: neg x8, x1
69 ; CHECK-NEXT: ror x0, x0, x8
71 %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %z)
77 define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) {
78 ; CHECK-LABEL: rotl_v4i32:
80 ; CHECK-NEXT: movi v2.4s, #31
81 ; CHECK-NEXT: neg v3.4s, v1.4s
82 ; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
83 ; CHECK-NEXT: and v2.16b, v3.16b, v2.16b
84 ; CHECK-NEXT: neg v2.4s, v2.4s
85 ; CHECK-NEXT: ushl v1.4s, v0.4s, v1.4s
86 ; CHECK-NEXT: ushl v0.4s, v0.4s, v2.4s
87 ; CHECK-NEXT: orr v0.16b, v1.16b, v0.16b
89 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
93 ; Vector rotate by constant splat amount.
95 define <4 x i32> @rotl_v4i32_rotl_const_shift(<4 x i32> %x) {
96 ; CHECK-LABEL: rotl_v4i32_rotl_const_shift:
98 ; CHECK-NEXT: ushr v1.4s, v0.4s, #29
99 ; CHECK-NEXT: shl v0.4s, v0.4s, #3
100 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
102 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
106 ; Repeat everything for funnel shift right.
108 ; When first 2 operands match, it's a rotate.
110 define i8 @rotr_i8_const_shift(i8 %x) {
111 ; CHECK-LABEL: rotr_i8_const_shift:
113 ; CHECK-NEXT: ubfx w8, w0, #3, #5
114 ; CHECK-NEXT: bfi w8, w0, #5, #27
115 ; CHECK-NEXT: mov w0, w8
117 %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3)
121 define i32 @rotr_i32_const_shift(i32 %x) {
122 ; CHECK-LABEL: rotr_i32_const_shift:
124 ; CHECK-NEXT: ror w0, w0, #3
126 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 3)
130 ; When first 2 operands match, it's a rotate (by variable amount).
132 define i16 @rotr_i16(i16 %x, i16 %z) {
133 ; CHECK-LABEL: rotr_i16:
135 ; CHECK-NEXT: and w8, w0, #0xffff
136 ; CHECK-NEXT: and w9, w1, #0xf
137 ; CHECK-NEXT: neg w10, w1
138 ; CHECK-NEXT: lsr w8, w8, w9
139 ; CHECK-NEXT: and w9, w10, #0xf
140 ; CHECK-NEXT: lsl w9, w0, w9
141 ; CHECK-NEXT: orr w0, w9, w8
143 %f = call i16 @llvm.fshr.i16(i16 %x, i16 %x, i16 %z)
147 define i32 @rotr_i32(i32 %x, i32 %z) {
148 ; CHECK-LABEL: rotr_i32:
150 ; CHECK-NEXT: ror w0, w0, w1
152 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %z)
156 define i64 @rotr_i64(i64 %x, i64 %z) {
157 ; CHECK-LABEL: rotr_i64:
159 ; CHECK-NEXT: ror x0, x0, x1
161 %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 %z)
167 define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) {
168 ; CHECK-LABEL: rotr_v4i32:
170 ; CHECK-NEXT: movi v2.4s, #31
171 ; CHECK-NEXT: neg v3.4s, v1.4s
172 ; CHECK-NEXT: and v1.16b, v1.16b, v2.16b
173 ; CHECK-NEXT: and v2.16b, v3.16b, v2.16b
174 ; CHECK-NEXT: neg v1.4s, v1.4s
175 ; CHECK-NEXT: ushl v1.4s, v0.4s, v1.4s
176 ; CHECK-NEXT: ushl v0.4s, v0.4s, v2.4s
177 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
179 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> %z)
183 ; Vector rotate by constant splat amount.
185 define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) {
186 ; CHECK-LABEL: rotr_v4i32_const_shift:
188 ; CHECK-NEXT: ushr v1.4s, v0.4s, #3
189 ; CHECK-NEXT: shl v0.4s, v0.4s, #29
190 ; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
192 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 3, i32 3, i32 3, i32 3>)
196 define i32 @rotl_i32_shift_by_bitwidth(i32 %x) {
197 ; CHECK-LABEL: rotl_i32_shift_by_bitwidth:
200 %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 32)
204 define i32 @rotr_i32_shift_by_bitwidth(i32 %x) {
205 ; CHECK-LABEL: rotr_i32_shift_by_bitwidth:
208 %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 32)
212 define <4 x i32> @rotl_v4i32_shift_by_bitwidth(<4 x i32> %x) {
213 ; CHECK-LABEL: rotl_v4i32_shift_by_bitwidth:
216 %f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)
220 define <4 x i32> @rotr_v4i32_shift_by_bitwidth(<4 x i32> %x) {
221 ; CHECK-LABEL: rotr_v4i32_shift_by_bitwidth:
224 %f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)