1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
11 ; while ((s >>= 1) > 0) {
12 ; mask ^= (mask << s);
13 ; v = ((v >> s) & mask) | ((v << s) & ~mask);
17 define i8 @rev8(i8 %v) {
20 ; CHECK-NEXT: [[OR_2:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[V:%.*]])
21 ; CHECK-NEXT: ret i8 [[OR_2]]
26 %or = or i8 %shr4, %shl7
27 %shr4.1 = lshr i8 %or, 2
28 %and.1 = and i8 %shr4.1, 51
29 %shl7.1 = shl i8 %or, 2
30 %and9.1 = and i8 %shl7.1, -52
31 %or.1 = or i8 %and.1, %and9.1
32 %shr4.2 = lshr i8 %or.1, 1
33 %and.2 = and i8 %shr4.2, 85
34 %shl7.2 = shl i8 %or.1, 1
35 %and9.2 = and i8 %shl7.2, -86
36 %or.2 = or i8 %and.2, %and9.2
40 define i16 @rev16(i16 %v) {
41 ; CHECK-LABEL: @rev16(
43 ; CHECK-NEXT: [[OR_3:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[V:%.*]])
44 ; CHECK-NEXT: ret i16 [[OR_3]]
47 %shr4 = lshr i16 %v, 8
49 %or = or i16 %shr4, %shl7
50 %shr4.1 = lshr i16 %or, 4
51 %and.1 = and i16 %shr4.1, 3855
52 %shl7.1 = shl i16 %or, 4
53 %and9.1 = and i16 %shl7.1, -3856
54 %or.1 = or i16 %and.1, %and9.1
55 %shr4.2 = lshr i16 %or.1, 2
56 %and.2 = and i16 %shr4.2, 13107
57 %shl7.2 = shl i16 %or.1, 2
58 %and9.2 = and i16 %shl7.2, -13108
59 %or.2 = or i16 %and.2, %and9.2
60 %shr4.3 = lshr i16 %or.2, 1
61 %and.3 = and i16 %shr4.3, 21845
62 %shl7.3 = shl i16 %or.2, 1
63 %and9.3 = and i16 %shl7.3, -21846
64 %or.3 = or i16 %and.3, %and9.3
68 define i32 @rev32(i32 %v) {
69 ; CHECK-LABEL: @rev32(
71 ; CHECK-NEXT: [[OR_4:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[V:%.*]])
72 ; CHECK-NEXT: ret i32 [[OR_4]]
75 %shr1 = lshr i32 %v, 16
76 %shl2 = shl i32 %v, 16
77 %or = or i32 %shr1, %shl2
78 %shr1.1 = lshr i32 %or, 8
79 %and.1 = and i32 %shr1.1, 16711935
80 %shl2.1 = shl i32 %or, 8
81 %and3.1 = and i32 %shl2.1, -16711936
82 %or.1 = or i32 %and.1, %and3.1
83 %shr1.2 = lshr i32 %or.1, 4
84 %and.2 = and i32 %shr1.2, 252645135
85 %shl2.2 = shl i32 %or.1, 4
86 %and3.2 = and i32 %shl2.2, -252645136
87 %or.2 = or i32 %and.2, %and3.2
88 %shr1.3 = lshr i32 %or.2, 2
89 %and.3 = and i32 %shr1.3, 858993459
90 %shl2.3 = shl i32 %or.2, 2
91 %and3.3 = and i32 %shl2.3, -858993460
92 %or.3 = or i32 %and.3, %and3.3
93 %shr1.4 = lshr i32 %or.3, 1
94 %and.4 = and i32 %shr1.4, 1431655765
95 %shl2.4 = shl i32 %or.3, 1
96 %and3.4 = and i32 %shl2.4, -1431655766
97 %or.4 = or i32 %and.4, %and3.4
101 define i64 @rev64(i64 %v) {
102 ; CHECK-LABEL: @rev64(
104 ; CHECK-NEXT: [[OR_5:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[V:%.*]])
105 ; CHECK-NEXT: ret i64 [[OR_5]]
108 %shr2 = lshr i64 %v, 32
109 %shl4 = shl i64 %v, 32
110 %or = or i64 %shr2, %shl4
111 %shr2.1 = lshr i64 %or, 16
112 %and.1 = and i64 %shr2.1, 281470681808895
113 %shl4.1 = shl i64 %or, 16
114 %and5.1 = and i64 %shl4.1, -281470681808896
115 %or.1 = or i64 %and.1, %and5.1
116 %shr2.2 = lshr i64 %or.1, 8
117 %and.2 = and i64 %shr2.2, 71777214294589695
118 %shl4.2 = shl i64 %or.1, 8
119 %and5.2 = and i64 %shl4.2, -71777214294589696
120 %or.2 = or i64 %and.2, %and5.2
121 %shr2.3 = lshr i64 %or.2, 4
122 %and.3 = and i64 %shr2.3, 1085102592571150095
123 %shl4.3 = shl i64 %or.2, 4
124 %and5.3 = and i64 %shl4.3, -1085102592571150096
125 %or.3 = or i64 %and.3, %and5.3
126 %shr2.4 = lshr i64 %or.3, 2
127 %and.4 = and i64 %shr2.4, 3689348814741910323
128 %shl4.4 = shl i64 %or.3, 2
129 %and5.4 = and i64 %shl4.4, -3689348814741910324
130 %or.4 = or i64 %and.4, %and5.4
131 %shr2.5 = lshr i64 %or.4, 1
132 %and.5 = and i64 %shr2.5, 6148914691236517205
133 %shl4.5 = shl i64 %or.4, 1
134 %and5.5 = and i64 %shl4.5, -6148914691236517206
135 %or.5 = or i64 %and.5, %and5.5
139 ;unsigned char rev8_xor(unsigned char x) {
141 ; y = x&0x55; x ^= y; x |= (y<<2)|(y>>6);
142 ; y = x&0x66; x ^= y; x |= (y<<4)|(y>>4);
143 ; return (x<<1)|(x>>7);
146 define i8 @rev8_xor(i8 %0) {
147 ; CHECK-LABEL: @rev8_xor(
148 ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[TMP0:%.*]])
149 ; CHECK-NEXT: ret i8 [[TMP2]]
169 define <2 x i8> @rev8_xor_vector(<2 x i8> %0) {
170 ; CHECK-LABEL: @rev8_xor_vector(
171 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i8> @llvm.bitreverse.v2i8(<2 x i8> [[TMP0:%.*]])
172 ; CHECK-NEXT: ret <2 x i8> [[TMP2]]
174 %2 = and <2 x i8> %0, <i8 85, i8 85>
175 %3 = xor <2 x i8> %0, %2
176 %4 = shl <2 x i8> %2, <i8 2, i8 2>
177 %5 = lshr <2 x i8> %2, <i8 6, i8 6>
178 %6 = or <2 x i8> %5, %3
179 %7 = or <2 x i8> %6, %4
180 %8 = and <2 x i8> %7, <i8 102, i8 102>
181 %9 = xor <2 x i8> %7, %8
182 %10 = lshr <2 x i8> %8, <i8 4, i8 4>
183 %11 = or <2 x i8> %10, %9
184 %12 = shl <2 x i8> %8, <i8 5, i8 5>
185 %13 = shl <2 x i8> %11, <i8 1, i8 1>
186 %14 = or <2 x i8> %12, %13
187 %15 = lshr <2 x i8> %0, <i8 7, i8 7>
188 %16 = or <2 x i8> %14, %15
192 ; bitreverse8(x) = ((x * 0x0202020202ULL) & 0x010884422010ULL) % 1023
193 define i8 @rev8_mul_and_urem(i8 %0) {
194 ; CHECK-LABEL: @rev8_mul_and_urem(
195 ; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0:%.*]] to i64
196 ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw nsw i64 [[TMP2]], 8623620610
197 ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 1136090292240
198 ; CHECK-NEXT: [[TMP5:%.*]] = urem i64 [[TMP4]], 1023
199 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i8
200 ; CHECK-NEXT: ret i8 [[TMP6]]
202 %2 = zext i8 %0 to i64
203 %3 = mul nuw nsw i64 %2, 8623620610
204 %4 = and i64 %3, 1136090292240
205 %5 = urem i64 %4, 1023
206 %6 = trunc i64 %5 to i8
210 ; bitreverse8(x) = ((x * 0x80200802ULL) & 0x0884422110ULL) * 0x0101010101ULL >> 32
211 define i8 @rev8_mul_and_mul(i8 %0) {
212 ; CHECK-LABEL: @rev8_mul_and_mul(
213 ; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0:%.*]] to i64
214 ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw nsw i64 [[TMP2]], 2149582850
215 ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 36578664720
216 ; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4311810305
217 ; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP5]], 32
218 ; CHECK-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i8
219 ; CHECK-NEXT: ret i8 [[TMP7]]
221 %2 = zext i8 %0 to i64
222 %3 = mul nuw nsw i64 %2, 2149582850
223 %4 = and i64 %3, 36578664720
224 %5 = mul i64 %4, 4311810305
226 %7 = trunc i64 %6 to i8
230 ; bitreverse8(x) = (((x * 0x0802LU) & 0x22110LU) | ((x * 0x8020LU) & 0x88440LU)) * 0x10101LU >> 16
231 define i8 @rev8_mul_and_lshr(i8 %0) {
232 ; CHECK-LABEL: @rev8_mul_and_lshr(
233 ; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0:%.*]] to i64
234 ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw nsw i64 [[TMP2]], 2050
235 ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 139536
236 ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw nsw i64 [[TMP2]], 32800
237 ; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], 558144
238 ; CHECK-NEXT: [[TMP7:%.*]] = or i64 [[TMP4]], [[TMP6]]
239 ; CHECK-NEXT: [[TMP8:%.*]] = mul nuw nsw i64 [[TMP7]], 65793
240 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 16
241 ; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i8
242 ; CHECK-NEXT: ret i8 [[TMP10]]
244 %2 = zext i8 %0 to i64
245 %3 = mul nuw nsw i64 %2, 2050
246 %4 = and i64 %3, 139536
247 %5 = mul nuw nsw i64 %2, 32800
248 %6 = and i64 %5, 558144
250 %8 = mul nuw nsw i64 %7, 65793
252 %10 = trunc i64 %9 to i8