1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
4 ; https://bugs.llvm.org/show_bug.cgi?id=38149
7 ; ((%x << MaskedBits) a>> MaskedBits) == %x
8 ; Should be transformed into:
9 ; (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
10 ; Where KeptBits = bitwidth(%x) - MaskedBits
12 ; ============================================================================ ;
13 ; Basic positive tests
14 ; ============================================================================ ;
16 define i1 @p0(i8 %x) {
18 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X:%.*]], 4
19 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 8
20 ; CHECK-NEXT: ret i1 [[TMP2]]
23 %tmp1 = ashr exact i8 %tmp0, 5
24 %tmp2 = icmp eq i8 %tmp1, %x
28 ; Big unusual bit width, https://bugs.llvm.org/show_bug.cgi?id=38204
29 define i1 @pb(i65 %x) {
31 ; CHECK-NEXT: [[TMP1:%.*]] = add i65 [[X:%.*]], 9223372036854775808
32 ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i65 [[TMP1]], -1
33 ; CHECK-NEXT: ret i1 [[TMP2]]
36 %tmp1 = ashr exact i65 %tmp0, 1
37 %tmp2 = icmp eq i65 %x, %tmp1
41 ; ============================================================================ ;
43 ; ============================================================================ ;
45 define <2 x i1> @p1_vec_splat(<2 x i8> %x) {
46 ; CHECK-LABEL: @p1_vec_splat(
47 ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X:%.*]], <i8 4, i8 4>
48 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult <2 x i8> [[TMP1]], <i8 8, i8 8>
49 ; CHECK-NEXT: ret <2 x i1> [[TMP2]]
51 %tmp0 = shl <2 x i8> %x, <i8 5, i8 5>
52 %tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 5>
53 %tmp2 = icmp eq <2 x i8> %tmp1, %x
57 define <2 x i1> @p2_vec_nonsplat(<2 x i8> %x) {
58 ; CHECK-LABEL: @p2_vec_nonsplat(
59 ; CHECK-NEXT: [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 5, i8 6>
60 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 5, i8 6>
61 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i8> [[TMP1]], [[X]]
62 ; CHECK-NEXT: ret <2 x i1> [[TMP2]]
64 %tmp0 = shl <2 x i8> %x, <i8 5, i8 6>
65 %tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 6>
66 %tmp2 = icmp eq <2 x i8> %tmp1, %x
70 define <3 x i1> @p3_vec_undef0(<3 x i8> %x) {
71 ; CHECK-LABEL: @p3_vec_undef0(
72 ; CHECK-NEXT: [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 undef, i8 5>
73 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 5, i8 5>
74 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]]
75 ; CHECK-NEXT: ret <3 x i1> [[TMP2]]
77 %tmp0 = shl <3 x i8> %x, <i8 5, i8 undef, i8 5>
78 %tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 5, i8 5>
79 %tmp2 = icmp eq <3 x i8> %tmp1, %x
83 define <3 x i1> @p4_vec_undef1(<3 x i8> %x) {
84 ; CHECK-LABEL: @p4_vec_undef1(
85 ; CHECK-NEXT: [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 5, i8 5>
86 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 undef, i8 5>
87 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]]
88 ; CHECK-NEXT: ret <3 x i1> [[TMP2]]
90 %tmp0 = shl <3 x i8> %x, <i8 5, i8 5, i8 5>
91 %tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 undef, i8 5>
92 %tmp2 = icmp eq <3 x i8> %tmp1, %x
96 define <3 x i1> @p5_vec_undef2(<3 x i8> %x) {
97 ; CHECK-LABEL: @p5_vec_undef2(
98 ; CHECK-NEXT: [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 undef, i8 5>
99 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 undef, i8 5>
100 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]]
101 ; CHECK-NEXT: ret <3 x i1> [[TMP2]]
103 %tmp0 = shl <3 x i8> %x, <i8 5, i8 undef, i8 5>
104 %tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 undef, i8 5>
105 %tmp2 = icmp eq <3 x i8> %tmp1, %x
109 ; ============================================================================ ;
110 ; Commutativity tests.
111 ; ============================================================================ ;
117 ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
118 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], 4
119 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 8
120 ; CHECK-NEXT: ret i1 [[TMP2]]
124 %tmp1 = ashr exact i8 %tmp0, 5
125 %tmp2 = icmp eq i8 %x, %tmp1 ; swapped order
129 ; ============================================================================ ;
131 ; ============================================================================ ;
133 declare void @use8(i8)
135 define i1 @n_oneuse0(i8 %x) {
136 ; CHECK-LABEL: @n_oneuse0(
137 ; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
138 ; CHECK-NEXT: call void @use8(i8 [[TMP0]])
139 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], 4
140 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 8
141 ; CHECK-NEXT: ret i1 [[TMP2]]
144 call void @use8(i8 %tmp0)
145 %tmp1 = ashr exact i8 %tmp0, 5
146 %tmp2 = icmp eq i8 %tmp1, %x
150 define i1 @n_oneuse1(i8 %x) {
151 ; CHECK-LABEL: @n_oneuse1(
152 ; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
153 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
154 ; CHECK-NEXT: call void @use8(i8 [[TMP1]])
155 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
156 ; CHECK-NEXT: ret i1 [[TMP2]]
159 %tmp1 = ashr exact i8 %tmp0, 5
160 call void @use8(i8 %tmp1)
161 %tmp2 = icmp eq i8 %tmp1, %x
165 define i1 @n_oneuse2(i8 %x) {
166 ; CHECK-LABEL: @n_oneuse2(
167 ; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
168 ; CHECK-NEXT: call void @use8(i8 [[TMP0]])
169 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
170 ; CHECK-NEXT: call void @use8(i8 [[TMP1]])
171 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
172 ; CHECK-NEXT: ret i1 [[TMP2]]
175 call void @use8(i8 %tmp0)
176 %tmp1 = ashr exact i8 %tmp0, 5
177 call void @use8(i8 %tmp1)
178 %tmp2 = icmp eq i8 %tmp1, %x
182 ; ============================================================================ ;
184 ; ============================================================================ ;
186 define i1 @n0(i8 %x) {
188 ; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
189 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 3
190 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
191 ; CHECK-NEXT: ret i1 [[TMP2]]
194 %tmp1 = ashr exact i8 %tmp0, 3 ; not 5
195 %tmp2 = icmp eq i8 %tmp1, %x
199 define i1 @n1(i8 %x) {
201 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[X:%.*]], 8
202 ; CHECK-NEXT: ret i1 [[TMP1]]
205 %tmp1 = lshr exact i8 %tmp0, 5 ; not ashr
206 %tmp2 = icmp eq i8 %tmp1, %x
210 define i1 @n2(i8 %x, i8 %y) {
212 ; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
213 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
214 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]]
215 ; CHECK-NEXT: ret i1 [[TMP2]]
218 %tmp1 = ashr exact i8 %tmp0, 5
219 %tmp2 = icmp eq i8 %tmp1, %y ; not %x
223 define <2 x i1> @n3_vec_nonsplat(<2 x i8> %x) {
224 ; CHECK-LABEL: @n3_vec_nonsplat(
225 ; CHECK-NEXT: [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 5, i8 5>
226 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 5, i8 3>
227 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i8> [[TMP1]], [[X]]
228 ; CHECK-NEXT: ret <2 x i1> [[TMP2]]
230 %tmp0 = shl <2 x i8> %x, <i8 5, i8 5>
231 %tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 3> ; 3 instead of 5
232 %tmp2 = icmp eq <2 x i8> %tmp1, %x