1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
4 ; With left shift, the comparison should not be modified.
5 define i1 @test_shift_and_cmp_not_changed1(i8 %p) {
6 ; CHECK-LABEL: @test_shift_and_cmp_not_changed1(
7 ; CHECK-NEXT: [[SHLP:%.*]] = shl i8 [[P:%.*]], 5
8 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[SHLP]], 64
9 ; CHECK-NEXT: ret i1 [[CMP]]
12 %andp = and i8 %shlp, -64
13 %cmp = icmp slt i8 %andp, 32
17 ; With arithmetic right shift, the comparison should not be modified.
18 define i1 @test_shift_and_cmp_not_changed2(i8 %p) {
19 ; CHECK-LABEL: @test_shift_and_cmp_not_changed2(
20 ; CHECK-NEXT: ret i1 true
23 %andp = and i8 %shlp, -64
24 %cmp = icmp slt i8 %andp, 32
28 ; This should simplify functionally to the left shift case.
29 ; The extra input parameter should be optimized away.
30 define i1 @test_shift_and_cmp_changed1(i8 %p, i8 %q) {
31 ; CHECK-LABEL: @test_shift_and_cmp_changed1(
32 ; CHECK-NEXT: [[ANDP:%.*]] = shl i8 [[P:%.*]], 5
33 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[ANDP]], 33
34 ; CHECK-NEXT: ret i1 [[CMP]]
38 %or = or i8 %andq, %andp
40 %ashr = ashr i8 %shl, 5
41 %cmp = icmp slt i8 %ashr, 1
45 define <2 x i1> @test_shift_and_cmp_changed1_vec(<2 x i8> %p, <2 x i8> %q) {
46 ; CHECK-LABEL: @test_shift_and_cmp_changed1_vec(
47 ; CHECK-NEXT: [[ANDP:%.*]] = shl <2 x i8> [[P:%.*]], splat (i8 5)
48 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[ANDP]], splat (i8 33)
49 ; CHECK-NEXT: ret <2 x i1> [[CMP]]
51 %andp = and <2 x i8> %p, <i8 6, i8 6>
52 %andq = and <2 x i8> %q, <i8 8, i8 8>
53 %or = or <2 x i8> %andq, %andp
54 %shl = shl <2 x i8> %or, <i8 5, i8 5>
55 %ashr = ashr <2 x i8> %shl, <i8 5, i8 5>
56 %cmp = icmp slt <2 x i8> %ashr, <i8 1, i8 1>
60 ; Unsigned compare allows a transformation to compare against 0.
61 define i1 @test_shift_and_cmp_changed2(i8 %p) {
62 ; CHECK-LABEL: @test_shift_and_cmp_changed2(
63 ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[P:%.*]], 6
64 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP1]], 0
65 ; CHECK-NEXT: ret i1 [[CMP]]
68 %andp = and i8 %shlp, -64
69 %cmp = icmp ult i8 %andp, 32
73 define <2 x i1> @test_shift_and_cmp_changed2_vec(<2 x i8> %p) {
74 ; CHECK-LABEL: @test_shift_and_cmp_changed2_vec(
75 ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[P:%.*]], splat (i8 6)
76 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[TMP1]], zeroinitializer
77 ; CHECK-NEXT: ret <2 x i1> [[CMP]]
79 %shlp = shl <2 x i8> %p, <i8 5, i8 5>
80 %andp = and <2 x i8> %shlp, <i8 -64, i8 -64>
81 %cmp = icmp ult <2 x i8> %andp, <i8 32, i8 32>
85 ; nsw on the shift should not affect the comparison.
86 define i1 @test_shift_and_cmp_changed3(i8 %p) {
87 ; CHECK-LABEL: @test_shift_and_cmp_changed3(
88 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[P:%.*]], 2
89 ; CHECK-NEXT: ret i1 [[CMP]]
91 %shlp = shl nsw i8 %p, 5
92 %andp = and i8 %shlp, -64
93 %cmp = icmp slt i8 %andp, 32
97 ; Logical shift right allows a return true because the 'and' guarantees no bits are set.
98 define i1 @test_shift_and_cmp_changed4(i8 %p) {
99 ; CHECK-LABEL: @test_shift_and_cmp_changed4(
100 ; CHECK-NEXT: ret i1 true
102 %shlp = lshr i8 %p, 5
103 %andp = and i8 %shlp, -64
104 %cmp = icmp slt i8 %andp, 32