1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -aggressive-instcombine -S | FileCheck %s
4 ; PR37098 - https://bugs.llvm.org/show_bug.cgi?id=37098
6 define i32 @anyset_two_bit_mask(i32 %x) {
7 ; CHECK-LABEL: @anyset_two_bit_mask(
8 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 9
9 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
10 ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
11 ; CHECK-NEXT: ret i32 [[TMP3]]
19 define i32 @anyset_four_bit_mask(i32 %x) {
20 ; CHECK-LABEL: @anyset_four_bit_mask(
21 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 297
22 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
23 ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
24 ; CHECK-NEXT: ret i32 [[TMP3]]
36 ; We're not testing the LSB here, so all of the 'or' operands are shifts.
38 define i32 @anyset_three_bit_mask_all_shifted_bits(i32 %x) {
39 ; CHECK-LABEL: @anyset_three_bit_mask_all_shifted_bits(
40 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 296
41 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
42 ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
43 ; CHECK-NEXT: ret i32 [[TMP3]]
54 ; Recognize the 'and' sibling pattern (all-bits-set). The 'and 1' may not be at the end.
56 define i32 @allset_two_bit_mask(i32 %x) {
57 ; CHECK-LABEL: @allset_two_bit_mask(
58 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 129
59 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 129
60 ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
61 ; CHECK-NEXT: ret i32 [[TMP3]]
69 define i64 @allset_four_bit_mask(i64 %x) {
70 ; CHECK-LABEL: @allset_four_bit_mask(
71 ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 30
72 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 30
73 ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
74 ; CHECK-NEXT: ret i64 [[TMP3]]
81 %a2 = and i64 %t2, %a1
82 %a3 = and i64 %a2, %t1
87 declare void @use(i32)
89 ; negative test - extra use means the transform would increase instruction count
91 define i32 @allset_two_bit_mask_multiuse(i32 %x) {
92 ; CHECK-LABEL: @allset_two_bit_mask_multiuse(
93 ; CHECK-NEXT: [[S:%.*]] = lshr i32 [[X:%.*]], 7
94 ; CHECK-NEXT: [[O:%.*]] = and i32 [[S]], [[X]]
95 ; CHECK-NEXT: [[R:%.*]] = and i32 [[O]], 1
96 ; CHECK-NEXT: call void @use(i32 [[O]])
97 ; CHECK-NEXT: ret i32 [[R]]
102 call void @use(i32 %o)
106 ; negative test - missing 'and 1' mask, so more than the low bit is used here
108 define i8 @allset_three_bit_mask_no_and1(i8 %x) {
109 ; CHECK-LABEL: @allset_three_bit_mask_no_and1(
110 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[X:%.*]], 1
111 ; CHECK-NEXT: [[T2:%.*]] = lshr i8 [[X]], 2
112 ; CHECK-NEXT: [[T3:%.*]] = lshr i8 [[X]], 3
113 ; CHECK-NEXT: [[A2:%.*]] = and i8 [[T1]], [[T2]]
114 ; CHECK-NEXT: [[R:%.*]] = and i8 [[A2]], [[T3]]
115 ; CHECK-NEXT: ret i8 [[R]]
120 %a2 = and i8 %t1, %t2
125 ; This test demonstrates that the transform can be large. If the implementation
126 ; is slow or explosive (stack overflow due to recursion), it should be made efficient.
128 define i64 @allset_40_bit_mask(i64 %x) {
129 ; CHECK-LABEL: @allset_40_bit_mask(
130 ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 2199023255550
131 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 2199023255550
132 ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
133 ; CHECK-NEXT: ret i64 [[TMP3]]
144 %t10 = lshr i64 %x, 10
145 %t11 = lshr i64 %x, 11
146 %t12 = lshr i64 %x, 12
147 %t13 = lshr i64 %x, 13
148 %t14 = lshr i64 %x, 14
149 %t15 = lshr i64 %x, 15
150 %t16 = lshr i64 %x, 16
151 %t17 = lshr i64 %x, 17
152 %t18 = lshr i64 %x, 18
153 %t19 = lshr i64 %x, 19
154 %t20 = lshr i64 %x, 20
155 %t21 = lshr i64 %x, 21
156 %t22 = lshr i64 %x, 22
157 %t23 = lshr i64 %x, 23
158 %t24 = lshr i64 %x, 24
159 %t25 = lshr i64 %x, 25
160 %t26 = lshr i64 %x, 26
161 %t27 = lshr i64 %x, 27
162 %t28 = lshr i64 %x, 28
163 %t29 = lshr i64 %x, 29
164 %t30 = lshr i64 %x, 30
165 %t31 = lshr i64 %x, 31
166 %t32 = lshr i64 %x, 32
167 %t33 = lshr i64 %x, 33
168 %t34 = lshr i64 %x, 34
169 %t35 = lshr i64 %x, 35
170 %t36 = lshr i64 %x, 36
171 %t37 = lshr i64 %x, 37
172 %t38 = lshr i64 %x, 38
173 %t39 = lshr i64 %x, 39
174 %t40 = lshr i64 %x, 40
177 %a2 = and i64 %t2, %a1
178 %a3 = and i64 %t3, %a2
179 %a4 = and i64 %t4, %a3
180 %a5 = and i64 %t5, %a4
181 %a6 = and i64 %t6, %a5
182 %a7 = and i64 %t7, %a6
183 %a8 = and i64 %t8, %a7
184 %a9 = and i64 %t9, %a8
185 %a10 = and i64 %t10, %a9
186 %a11 = and i64 %t11, %a10
187 %a12 = and i64 %t12, %a11
188 %a13 = and i64 %t13, %a12
189 %a14 = and i64 %t14, %a13
190 %a15 = and i64 %t15, %a14
191 %a16 = and i64 %t16, %a15
192 %a17 = and i64 %t17, %a16
193 %a18 = and i64 %t18, %a17
194 %a19 = and i64 %t19, %a18
195 %a20 = and i64 %t20, %a19
196 %a21 = and i64 %t21, %a20
197 %a22 = and i64 %t22, %a21
198 %a23 = and i64 %t23, %a22
199 %a24 = and i64 %t24, %a23
200 %a25 = and i64 %t25, %a24
201 %a26 = and i64 %t26, %a25
202 %a27 = and i64 %t27, %a26
203 %a28 = and i64 %t28, %a27
204 %a29 = and i64 %t29, %a28
205 %a30 = and i64 %t30, %a29
206 %a31 = and i64 %t31, %a30
207 %a32 = and i64 %t32, %a31
208 %a33 = and i64 %t33, %a32
209 %a34 = and i64 %t34, %a33
210 %a35 = and i64 %t35, %a34
211 %a36 = and i64 %t36, %a35
212 %a37 = and i64 %t37, %a36
213 %a38 = and i64 %t38, %a37
214 %a39 = and i64 %t39, %a38
215 %a40 = and i64 %t40, %a39
220 ; Verify that unsimplified code doesn't crash:
221 ; https://bugs.llvm.org/show_bug.cgi?id=37446
223 define i32 @PR37446(i32 %x) {
224 ; CHECK-LABEL: @PR37446(
225 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 1, 33
226 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], 15
227 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[AND]], [[X:%.*]]
228 ; CHECK-NEXT: ret i32 [[AND1]]
230 %shr = lshr i32 1, 33
231 %and = and i32 %shr, 15
232 %and1 = and i32 %and, %x