1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
6 declare i16 @llvm.bitreverse.i16(i16)
7 declare i32 @llvm.bitreverse.i32(i32)
8 declare i64 @llvm.bitreverse.i64(i64)
9 declare <2 x i8> @llvm.bitreverse.v2i8(<2 x i8>)
10 declare <2 x i32> @llvm.bitreverse.v2i32(<2 x i32>)
11 declare void @use_i32(i32)
12 declare void @use_i64(i64)
15 ;template <typename T>
17 ; T s = sizeof(v) * 8;
19 ; while ((s >>= 1) > 0) {
20 ; mask ^= (mask << s);
21 ; v = ((v >> s) & mask) | ((v << s) & ~mask);
25 define i8 @rev8(i8 %v) {
28 ; CHECK-NEXT: [[OR_2:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[V:%.*]])
29 ; CHECK-NEXT: ret i8 [[OR_2]]
34 %or = or i8 %shr4, %shl7
35 %shr4.1 = lshr i8 %or, 2
36 %and.1 = and i8 %shr4.1, 51
37 %shl7.1 = shl i8 %or, 2
38 %and9.1 = and i8 %shl7.1, -52
39 %or.1 = or i8 %and.1, %and9.1
40 %shr4.2 = lshr i8 %or.1, 1
41 %and.2 = and i8 %shr4.2, 85
42 %shl7.2 = shl i8 %or.1, 1
43 %and9.2 = and i8 %shl7.2, -86
44 %or.2 = or i8 %and.2, %and9.2
48 define i16 @rev16(i16 %v) {
49 ; CHECK-LABEL: @rev16(
51 ; CHECK-NEXT: [[OR_3:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[V:%.*]])
52 ; CHECK-NEXT: ret i16 [[OR_3]]
55 %shr4 = lshr i16 %v, 8
57 %or = or i16 %shr4, %shl7
58 %shr4.1 = lshr i16 %or, 4
59 %and.1 = and i16 %shr4.1, 3855
60 %shl7.1 = shl i16 %or, 4
61 %and9.1 = and i16 %shl7.1, -3856
62 %or.1 = or i16 %and.1, %and9.1
63 %shr4.2 = lshr i16 %or.1, 2
64 %and.2 = and i16 %shr4.2, 13107
65 %shl7.2 = shl i16 %or.1, 2
66 %and9.2 = and i16 %shl7.2, -13108
67 %or.2 = or i16 %and.2, %and9.2
68 %shr4.3 = lshr i16 %or.2, 1
69 %and.3 = and i16 %shr4.3, 21845
70 %shl7.3 = shl i16 %or.2, 1
71 %and9.3 = and i16 %shl7.3, -21846
72 %or.3 = or i16 %and.3, %and9.3
76 define i32 @rev32(i32 %v) {
77 ; CHECK-LABEL: @rev32(
79 ; CHECK-NEXT: [[OR_4:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[V:%.*]])
80 ; CHECK-NEXT: ret i32 [[OR_4]]
83 %shr1 = lshr i32 %v, 16
84 %shl2 = shl i32 %v, 16
85 %or = or i32 %shr1, %shl2
86 %shr1.1 = lshr i32 %or, 8
87 %and.1 = and i32 %shr1.1, 16711935
88 %shl2.1 = shl i32 %or, 8
89 %and3.1 = and i32 %shl2.1, -16711936
90 %or.1 = or i32 %and.1, %and3.1
91 %shr1.2 = lshr i32 %or.1, 4
92 %and.2 = and i32 %shr1.2, 252645135
93 %shl2.2 = shl i32 %or.1, 4
94 %and3.2 = and i32 %shl2.2, -252645136
95 %or.2 = or i32 %and.2, %and3.2
96 %shr1.3 = lshr i32 %or.2, 2
97 %and.3 = and i32 %shr1.3, 858993459
98 %shl2.3 = shl i32 %or.2, 2
99 %and3.3 = and i32 %shl2.3, -858993460
100 %or.3 = or i32 %and.3, %and3.3
101 %shr1.4 = lshr i32 %or.3, 1
102 %and.4 = and i32 %shr1.4, 1431655765
103 %shl2.4 = shl i32 %or.3, 1
104 %and3.4 = and i32 %shl2.4, -1431655766
105 %or.4 = or i32 %and.4, %and3.4
109 define i32 @rev32_bswap(i32 %v) {
110 ; CHECK-LABEL: @rev32_bswap(
111 ; CHECK-NEXT: [[RET:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[V:%.*]])
112 ; CHECK-NEXT: ret i32 [[RET]]
114 %and.i = lshr i32 %v, 1
115 %shr.i = and i32 %and.i, 1431655765
116 %and1.i = shl i32 %v, 1
117 %shl.i = and i32 %and1.i, -1431655766
118 %or.i = or disjoint i32 %shr.i, %shl.i
119 %and2.i = lshr i32 %or.i, 2
120 %shr3.i = and i32 %and2.i, 858993459
121 %and4.i = shl i32 %or.i, 2
122 %shl5.i = and i32 %and4.i, -858993460
123 %or6.i = or disjoint i32 %shr3.i, %shl5.i
124 %and7.i = lshr i32 %or6.i, 4
125 %shr8.i = and i32 %and7.i, 252645135
126 %and9.i = shl i32 %or6.i, 4
127 %shl10.i = and i32 %and9.i, -252645136
128 %or11.i = or disjoint i32 %shr8.i, %shl10.i
129 %ret = call i32 @llvm.bswap.i32(i32 %or11.i)
133 define i64 @rev64(i64 %v) {
134 ; CHECK-LABEL: @rev64(
136 ; CHECK-NEXT: [[OR_5:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[V:%.*]])
137 ; CHECK-NEXT: ret i64 [[OR_5]]
140 %shr2 = lshr i64 %v, 32
141 %shl4 = shl i64 %v, 32
142 %or = or i64 %shr2, %shl4
143 %shr2.1 = lshr i64 %or, 16
144 %and.1 = and i64 %shr2.1, 281470681808895
145 %shl4.1 = shl i64 %or, 16
146 %and5.1 = and i64 %shl4.1, -281470681808896
147 %or.1 = or i64 %and.1, %and5.1
148 %shr2.2 = lshr i64 %or.1, 8
149 %and.2 = and i64 %shr2.2, 71777214294589695
150 %shl4.2 = shl i64 %or.1, 8
151 %and5.2 = and i64 %shl4.2, -71777214294589696
152 %or.2 = or i64 %and.2, %and5.2
153 %shr2.3 = lshr i64 %or.2, 4
154 %and.3 = and i64 %shr2.3, 1085102592571150095
155 %shl4.3 = shl i64 %or.2, 4
156 %and5.3 = and i64 %shl4.3, -1085102592571150096
157 %or.3 = or i64 %and.3, %and5.3
158 %shr2.4 = lshr i64 %or.3, 2
159 %and.4 = and i64 %shr2.4, 3689348814741910323
160 %shl4.4 = shl i64 %or.3, 2
161 %and5.4 = and i64 %shl4.4, -3689348814741910324
162 %or.4 = or i64 %and.4, %and5.4
163 %shr2.5 = lshr i64 %or.4, 1
164 %and.5 = and i64 %shr2.5, 6148914691236517205
165 %shl4.5 = shl i64 %or.4, 1
166 %and5.5 = and i64 %shl4.5, -6148914691236517206
167 %or.5 = or i64 %and.5, %and5.5
171 ;unsigned char rev8_xor(unsigned char x) {
173 ; y = x&0x55; x ^= y; x |= (y<<2)|(y>>6);
174 ; y = x&0x66; x ^= y; x |= (y<<4)|(y>>4);
175 ; return (x<<1)|(x>>7);
178 define i8 @rev8_xor(i8 %0) {
179 ; CHECK-LABEL: @rev8_xor(
180 ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[TMP0:%.*]])
181 ; CHECK-NEXT: ret i8 [[TMP2]]
201 define <2 x i8> @rev8_xor_vector(<2 x i8> %0) {
202 ; CHECK-LABEL: @rev8_xor_vector(
203 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i8> @llvm.bitreverse.v2i8(<2 x i8> [[TMP0:%.*]])
204 ; CHECK-NEXT: ret <2 x i8> [[TMP2]]
206 %2 = and <2 x i8> %0, <i8 85, i8 85>
207 %3 = xor <2 x i8> %0, %2
208 %4 = shl <2 x i8> %2, <i8 2, i8 2>
209 %5 = lshr <2 x i8> %2, <i8 6, i8 6>
210 %6 = or <2 x i8> %5, %3
211 %7 = or <2 x i8> %6, %4
212 %8 = and <2 x i8> %7, <i8 102, i8 102>
213 %9 = xor <2 x i8> %7, %8
214 %10 = lshr <2 x i8> %8, <i8 4, i8 4>
215 %11 = or <2 x i8> %10, %9
216 %12 = shl <2 x i8> %8, <i8 5, i8 5>
217 %13 = shl <2 x i8> %11, <i8 1, i8 1>
218 %14 = or <2 x i8> %12, %13
219 %15 = lshr <2 x i8> %0, <i8 7, i8 7>
220 %16 = or <2 x i8> %14, %15
224 ; bitreverse8(x) = ((x * 0x0202020202ULL) & 0x010884422010ULL) % 1023
225 define i8 @rev8_mul_and_urem(i8 %0) {
226 ; CHECK-LABEL: @rev8_mul_and_urem(
227 ; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0:%.*]] to i64
228 ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw nsw i64 [[TMP2]], 8623620610
229 ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 1136090292240
230 ; CHECK-NEXT: [[TMP5:%.*]] = urem i64 [[TMP4]], 1023
231 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i8
232 ; CHECK-NEXT: ret i8 [[TMP6]]
234 %2 = zext i8 %0 to i64
235 %3 = mul nuw nsw i64 %2, 8623620610
236 %4 = and i64 %3, 1136090292240
237 %5 = urem i64 %4, 1023
238 %6 = trunc i64 %5 to i8
242 ; bitreverse8(x) = ((x * 0x80200802ULL) & 0x0884422110ULL) * 0x0101010101ULL >> 32
243 define i8 @rev8_mul_and_mul(i8 %0) {
244 ; CHECK-LABEL: @rev8_mul_and_mul(
245 ; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0:%.*]] to i64
246 ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw nsw i64 [[TMP2]], 2149582850
247 ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 36578664720
248 ; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4311810305
249 ; CHECK-NEXT: [[TMP6:%.*]] = lshr i64 [[TMP5]], 32
250 ; CHECK-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP6]] to i8
251 ; CHECK-NEXT: ret i8 [[TMP7]]
253 %2 = zext i8 %0 to i64
254 %3 = mul nuw nsw i64 %2, 2149582850
255 %4 = and i64 %3, 36578664720
256 %5 = mul i64 %4, 4311810305
258 %7 = trunc i64 %6 to i8
262 ; bitreverse8(x) = (((x * 0x0802LU) & 0x22110LU) | ((x * 0x8020LU) & 0x88440LU)) * 0x10101LU >> 16
263 define i8 @rev8_mul_and_lshr(i8 %0) {
264 ; CHECK-LABEL: @rev8_mul_and_lshr(
265 ; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0:%.*]] to i64
266 ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw nsw i64 [[TMP2]], 2050
267 ; CHECK-NEXT: [[TMP4:%.*]] = and i64 [[TMP3]], 139536
268 ; CHECK-NEXT: [[TMP5:%.*]] = mul nuw nsw i64 [[TMP2]], 32800
269 ; CHECK-NEXT: [[TMP6:%.*]] = and i64 [[TMP5]], 558144
270 ; CHECK-NEXT: [[TMP7:%.*]] = or disjoint i64 [[TMP4]], [[TMP6]]
271 ; CHECK-NEXT: [[TMP8:%.*]] = mul nuw nsw i64 [[TMP7]], 65793
272 ; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP8]], 16
273 ; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i8
274 ; CHECK-NEXT: ret i8 [[TMP10]]
276 %2 = zext i8 %0 to i64
277 %3 = mul nuw nsw i64 %2, 2050
278 %4 = and i64 %3, 139536
279 %5 = mul nuw nsw i64 %2, 32800
280 %6 = and i64 %5, 558144
282 %8 = mul nuw nsw i64 %7, 65793
284 %10 = trunc i64 %9 to i8
288 define i4 @shuf_4bits(<4 x i1> %x) {
289 ; CHECK-LABEL: @shuf_4bits(
290 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i1> [[X:%.*]] to i4
291 ; CHECK-NEXT: [[CAST:%.*]] = call i4 @llvm.bitreverse.i4(i4 [[TMP1]])
292 ; CHECK-NEXT: ret i4 [[CAST]]
294 %bitreverse = shufflevector <4 x i1> %x, <4 x i1> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
295 %cast = bitcast <4 x i1> %bitreverse to i4
299 define i4 @shuf_load_4bits(ptr %p) {
300 ; CHECK-LABEL: @shuf_load_4bits(
301 ; CHECK-NEXT: [[X1:%.*]] = load i4, ptr [[P:%.*]], align 1
302 ; CHECK-NEXT: [[CAST:%.*]] = call i4 @llvm.bitreverse.i4(i4 [[X1]])
303 ; CHECK-NEXT: ret i4 [[CAST]]
305 %x = load <4 x i1>, ptr %p
306 %bitreverse = shufflevector <4 x i1> %x, <4 x i1> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
307 %cast = bitcast <4 x i1> %bitreverse to i4
311 define i4 @shuf_bitcast_twice_4bits(i4 %x) {
312 ; CHECK-LABEL: @shuf_bitcast_twice_4bits(
313 ; CHECK-NEXT: [[CAST2:%.*]] = call i4 @llvm.bitreverse.i4(i4 [[X:%.*]])
314 ; CHECK-NEXT: ret i4 [[CAST2]]
316 %cast1 = bitcast i4 %x to <4 x i1>
317 %bitreverse = shufflevector <4 x i1> %cast1, <4 x i1> undef, <4 x i32> <i32 poison, i32 2, i32 1, i32 0>
318 %cast2 = bitcast <4 x i1> %bitreverse to i4
322 ; Negative tests - not reverse
323 define i4 @shuf_4bits_not_reverse(<4 x i1> %x) {
324 ; CHECK-LABEL: @shuf_4bits_not_reverse(
325 ; CHECK-NEXT: [[BITREVERSE:%.*]] = shufflevector <4 x i1> [[X:%.*]], <4 x i1> poison, <4 x i32> <i32 3, i32 1, i32 2, i32 0>
326 ; CHECK-NEXT: [[CAST:%.*]] = bitcast <4 x i1> [[BITREVERSE]] to i4
327 ; CHECK-NEXT: ret i4 [[CAST]]
329 %bitreverse = shufflevector <4 x i1> %x, <4 x i1> undef, <4 x i32> <i32 3, i32 1, i32 2, i32 0>
330 %cast = bitcast <4 x i1> %bitreverse to i4
334 ; Negative test - extra use
335 declare void @use(<4 x i1>)
337 define i4 @shuf_4bits_extra_use(<4 x i1> %x) {
338 ; CHECK-LABEL: @shuf_4bits_extra_use(
339 ; CHECK-NEXT: [[BITREVERSE:%.*]] = shufflevector <4 x i1> [[X:%.*]], <4 x i1> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
340 ; CHECK-NEXT: call void @use(<4 x i1> [[BITREVERSE]])
341 ; CHECK-NEXT: [[CAST:%.*]] = bitcast <4 x i1> [[BITREVERSE]] to i4
342 ; CHECK-NEXT: ret i4 [[CAST]]
344 %bitreverse = shufflevector <4 x i1> %x, <4 x i1> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
345 call void @use(<4 x i1> %bitreverse)
346 %cast = bitcast <4 x i1> %bitreverse to i4
350 define i32 @rev_i1(i1 %x) {
351 ; CHECK-LABEL: @rev_i1(
352 ; CHECK-NEXT: [[Z:%.*]] = zext i1 [[X:%.*]] to i32
353 ; CHECK-NEXT: call void @use_i32(i32 [[Z]])
354 ; CHECK-NEXT: [[R:%.*]] = select i1 [[X]], i32 -2147483648, i32 0
355 ; CHECK-NEXT: ret i32 [[R]]
357 %z = zext i1 %x to i32
358 call void @use_i32(i32 %z)
359 %r = call i32 @llvm.bitreverse.i32(i32 %z)
363 define <2 x i8> @rev_v2i1(<2 x i1> %x) {
364 ; CHECK-LABEL: @rev_v2i1(
365 ; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[X:%.*]], <2 x i8> splat (i8 -128), <2 x i8> zeroinitializer
366 ; CHECK-NEXT: ret <2 x i8> [[R]]
368 %z = zext <2 x i1> %x to <2 x i8>
369 %r = call <2 x i8> @llvm.bitreverse.v2i8(<2 x i8> %z)
373 define i32 @rev_i2(i2 %x) {
374 ; CHECK-LABEL: @rev_i2(
375 ; CHECK-NEXT: [[Z:%.*]] = zext i2 [[X:%.*]] to i32
376 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[Z]])
377 ; CHECK-NEXT: ret i32 [[R]]
379 %z = zext i2 %x to i32
380 %r = call i32 @llvm.bitreverse.i32(i32 %z)
384 ; This used to infinite loop.
386 define i64 @PR59897(i1 %X1_2) {
387 ; CHECK-LABEL: @PR59897(
388 ; CHECK-NEXT: [[NOT_X1_2:%.*]] = xor i1 [[X1_2:%.*]], true
389 ; CHECK-NEXT: [[X0_3X2X5X0:%.*]] = zext i1 [[NOT_X1_2]] to i64
390 ; CHECK-NEXT: ret i64 [[X0_3X2X5X0]]
392 %X1_3 = zext i1 %X1_2 to i32
393 %X8_3x2x2x0 = call i32 @llvm.bitreverse.i32(i32 %X1_3)
394 %X8_4x2x3x0 = xor i32 %X8_3x2x2x0, -1
395 %X0_3x2x4x0 = lshr i32 %X8_4x2x3x0, 31
396 %X0_3x2x5x0 = zext i32 %X0_3x2x4x0 to i64
401 ; Fold: BITREVERSE( OP( BITREVERSE(x), y ) ) -> OP( x, BITREVERSE(y) )
403 define i16 @rev_xor_lhs_rev16(i16 %a, i16 %b) #0 {
404 ; CHECK-LABEL: @rev_xor_lhs_rev16(
405 ; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[B:%.*]])
406 ; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[A:%.*]], [[TMP1]]
407 ; CHECK-NEXT: ret i16 [[TMP2]]
409 %1 = tail call i16 @llvm.bitreverse.i16(i16 %a)
411 %3 = tail call i16 @llvm.bitreverse.i16(i16 %2)
415 define i32 @rev_and_rhs_rev32(i32 %a, i32 %b) #0 {
416 ; CHECK-LABEL: @rev_and_rhs_rev32(
417 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[A:%.*]])
418 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
419 ; CHECK-NEXT: ret i32 [[TMP2]]
421 %1 = tail call i32 @llvm.bitreverse.i32(i32 %b)
423 %3 = tail call i32 @llvm.bitreverse.i32(i32 %2)
427 define i32 @rev_or_rhs_rev32(i32 %a, i32 %b) #0 {
428 ; CHECK-LABEL: @rev_or_rhs_rev32(
429 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.bitreverse.i32(i32 [[A:%.*]])
430 ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[B:%.*]]
431 ; CHECK-NEXT: ret i32 [[TMP2]]
433 %1 = tail call i32 @llvm.bitreverse.i32(i32 %b)
435 %3 = tail call i32 @llvm.bitreverse.i32(i32 %2)
439 define i64 @rev_or_rhs_rev64(i64 %a, i64 %b) #0 {
440 ; CHECK-LABEL: @rev_or_rhs_rev64(
441 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[A:%.*]])
442 ; CHECK-NEXT: [[TMP2:%.*]] = or i64 [[TMP1]], [[B:%.*]]
443 ; CHECK-NEXT: ret i64 [[TMP2]]
445 %1 = tail call i64 @llvm.bitreverse.i64(i64 %b)
447 %3 = tail call i64 @llvm.bitreverse.i64(i64 %2)
451 define i64 @rev_xor_rhs_rev64(i64 %a, i64 %b) #0 {
452 ; CHECK-LABEL: @rev_xor_rhs_rev64(
453 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.bitreverse.i64(i64 [[A:%.*]])
454 ; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], [[B:%.*]]
455 ; CHECK-NEXT: ret i64 [[TMP2]]
457 %1 = tail call i64 @llvm.bitreverse.i64(i64 %b)
459 %3 = tail call i64 @llvm.bitreverse.i64(i64 %2)
463 define <2 x i32> @rev_xor_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 {
464 ; CHECK-LABEL: @rev_xor_rhs_i32vec(
465 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> [[A:%.*]])
466 ; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], [[B:%.*]]
467 ; CHECK-NEXT: ret <2 x i32> [[TMP2]]
469 %1 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %b)
470 %2 = xor <2 x i32> %a, %1
471 %3 = tail call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %2)
475 define i64 @rev_and_rhs_rev64_multiuse1(i64 %a, i64 %b) #0 {
476 ; CHECK-LABEL: @rev_and_rhs_rev64_multiuse1(
477 ; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[B:%.*]])
478 ; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A:%.*]], [[TMP1]]
479 ; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[TMP2]])
480 ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP2]], [[TMP3]]
481 ; CHECK-NEXT: ret i64 [[TMP4]]
483 %1 = tail call i64 @llvm.bitreverse.i64(i64 %b)
485 %3 = tail call i64 @llvm.bitreverse.i64(i64 %2)
486 %4 = mul i64 %2, %3 ;increase use of logical op
490 define i64 @rev_and_rhs_rev64_multiuse2(i64 %a, i64 %b) #0 {
491 ; CHECK-LABEL: @rev_and_rhs_rev64_multiuse2(
492 ; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[B:%.*]])
493 ; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A:%.*]], [[TMP1]]
494 ; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[TMP2]])
495 ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP1]], [[TMP3]]
496 ; CHECK-NEXT: ret i64 [[TMP4]]
498 %1 = tail call i64 @llvm.bitreverse.i64(i64 %b)
500 %3 = tail call i64 @llvm.bitreverse.i64(i64 %2)
501 %4 = mul i64 %1, %3 ;increase use of inner bitreverse
505 define i64 @rev_all_operand64(i64 %a, i64 %b) #0 {
506 ; CHECK-LABEL: @rev_all_operand64(
507 ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[A:%.*]], [[B:%.*]]
508 ; CHECK-NEXT: ret i64 [[TMP1]]
510 %1 = tail call i64 @llvm.bitreverse.i64(i64 %a)
511 %2 = tail call i64 @llvm.bitreverse.i64(i64 %b)
513 %4 = tail call i64 @llvm.bitreverse.i64(i64 %3)
517 define i64 @rev_all_operand64_multiuse_both(i64 %a, i64 %b) #0 {
518 ; CHECK-LABEL: @rev_all_operand64_multiuse_both(
519 ; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[A:%.*]])
520 ; CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[B:%.*]])
521 ; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[A]], [[B]]
522 ; CHECK-NEXT: call void @use_i64(i64 [[TMP1]])
523 ; CHECK-NEXT: call void @use_i64(i64 [[TMP2]])
524 ; CHECK-NEXT: ret i64 [[TMP3]]
526 %1 = tail call i64 @llvm.bitreverse.i64(i64 %a)
527 %2 = tail call i64 @llvm.bitreverse.i64(i64 %b)
529 %4 = tail call i64 @llvm.bitreverse.i64(i64 %3)
531 call void @use_i64(i64 %1)
532 call void @use_i64(i64 %2)
536 declare i32 @llvm.bswap.i32(i32 %or11.i)