1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt %s -instcombine -S | FileCheck %s
4 ; If we have some pattern that leaves only some low bits set, and then performs
5 ; left-shift of those bits, if none of the bits that are left after the final
6 ; shift are modified by the mask, we can omit the mask.
8 ; There are many variants to this pattern:
9 ; c) (x & (-1 >> maskNbits)) << shiftNbits
12 ; iff (shiftNbits-maskNbits) s>= 0 (i.e. shiftNbits u>= maskNbits)
14 ; Simple tests. We don't care about extra uses.
16 declare void @use32(i32)
18 define i32 @t0_basic(i32 %x, i32 %nbits) {
19 ; CHECK-LABEL: @t0_basic(
20 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]]
21 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]]
22 ; CHECK-NEXT: call void @use32(i32 [[T0]])
23 ; CHECK-NEXT: call void @use32(i32 [[T1]])
24 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]]
25 ; CHECK-NEXT: ret i32 [[T2]]
27 %t0 = lshr i32 -1, %nbits
29 call void @use32(i32 %t0)
30 call void @use32(i32 %t1)
31 %t2 = shl i32 %t1, %nbits
35 define i32 @t1_bigger_shift(i32 %x, i32 %nbits) {
36 ; CHECK-LABEL: @t1_bigger_shift(
37 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]]
38 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]]
39 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], 1
40 ; CHECK-NEXT: call void @use32(i32 [[T0]])
41 ; CHECK-NEXT: call void @use32(i32 [[T1]])
42 ; CHECK-NEXT: call void @use32(i32 [[T2]])
43 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[X]], [[T2]]
44 ; CHECK-NEXT: ret i32 [[T3]]
46 %t0 = lshr i32 -1, %nbits
48 %t2 = add i32 %nbits, 1
49 call void @use32(i32 %t0)
50 call void @use32(i32 %t1)
51 call void @use32(i32 %t2)
52 %t3 = shl i32 %t1, %t2
58 declare void @use3xi32(<3 x i32>)
60 define <3 x i32> @t2_vec_splat(<3 x i32> %x, <3 x i32> %nbits) {
61 ; CHECK-LABEL: @t2_vec_splat(
62 ; CHECK-NEXT: [[T0:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
63 ; CHECK-NEXT: [[T1:%.*]] = and <3 x i32> [[T0]], [[X:%.*]]
64 ; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[NBITS]], <i32 1, i32 1, i32 1>
65 ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]])
66 ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]])
67 ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]])
68 ; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[X]], [[T2]]
69 ; CHECK-NEXT: ret <3 x i32> [[T3]]
71 %t0 = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, %nbits
72 %t1 = and <3 x i32> %t0, %x
73 %t2 = add <3 x i32> %nbits, <i32 1, i32 1, i32 1>
74 call void @use3xi32(<3 x i32> %t0)
75 call void @use3xi32(<3 x i32> %t1)
76 call void @use3xi32(<3 x i32> %t2)
77 %t3 = shl <3 x i32> %t1, %t2
81 define <3 x i32> @t3_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) {
82 ; CHECK-LABEL: @t3_vec_nonsplat(
83 ; CHECK-NEXT: [[T0:%.*]] = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
84 ; CHECK-NEXT: [[T1:%.*]] = and <3 x i32> [[T0]], [[X:%.*]]
85 ; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[NBITS]], <i32 1, i32 0, i32 2>
86 ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]])
87 ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]])
88 ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]])
89 ; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[X]], [[T2]]
90 ; CHECK-NEXT: ret <3 x i32> [[T3]]
92 %t0 = lshr <3 x i32> <i32 -1, i32 -1, i32 -1>, %nbits
93 %t1 = and <3 x i32> %t0, %x
94 %t2 = add <3 x i32> %nbits, <i32 1, i32 0, i32 2>
95 call void @use3xi32(<3 x i32> %t0)
96 call void @use3xi32(<3 x i32> %t1)
97 call void @use3xi32(<3 x i32> %t2)
98 %t3 = shl <3 x i32> %t1, %t2
102 define <3 x i32> @t4_vec_undef(<3 x i32> %x, <3 x i32> %nbits) {
103 ; CHECK-LABEL: @t4_vec_undef(
104 ; CHECK-NEXT: [[T0:%.*]] = lshr <3 x i32> <i32 -1, i32 undef, i32 -1>, [[NBITS:%.*]]
105 ; CHECK-NEXT: [[T1:%.*]] = and <3 x i32> [[T0]], [[X:%.*]]
106 ; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[NBITS]], <i32 1, i32 undef, i32 1>
107 ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]])
108 ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]])
109 ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T2]])
110 ; CHECK-NEXT: [[T3:%.*]] = shl <3 x i32> [[X]], [[T2]]
111 ; CHECK-NEXT: ret <3 x i32> [[T3]]
113 %t0 = lshr <3 x i32> <i32 -1, i32 undef, i32 -1>, %nbits
114 %t1 = and <3 x i32> %t0, %x
115 %t2 = add <3 x i32> %nbits, <i32 1, i32 undef, i32 1>
116 call void @use3xi32(<3 x i32> %t0)
117 call void @use3xi32(<3 x i32> %t1)
118 call void @use3xi32(<3 x i32> %t2)
119 %t3 = shl <3 x i32> %t1, %t2
127 define i32 @t5_commutativity0(i32 %nbits) {
128 ; CHECK-LABEL: @t5_commutativity0(
129 ; CHECK-NEXT: [[X:%.*]] = call i32 @gen32()
130 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]]
131 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[X]], [[T0]]
132 ; CHECK-NEXT: call void @use32(i32 [[T0]])
133 ; CHECK-NEXT: call void @use32(i32 [[T1]])
134 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]]
135 ; CHECK-NEXT: ret i32 [[T2]]
137 %x = call i32 @gen32()
138 %t0 = lshr i32 -1, %nbits
139 %t1 = and i32 %x, %t0 ; swapped
140 call void @use32(i32 %t0)
141 call void @use32(i32 %t1)
142 %t2 = shl i32 %t1, %nbits
146 define i32 @t6_commutativity1(i32 %nbits0, i32 %nbits1) {
147 ; CHECK-LABEL: @t6_commutativity1(
148 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS0:%.*]]
149 ; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS1:%.*]]
150 ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[T1]]
151 ; CHECK-NEXT: call void @use32(i32 [[T0]])
152 ; CHECK-NEXT: call void @use32(i32 [[T1]])
153 ; CHECK-NEXT: call void @use32(i32 [[T2]])
154 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[NBITS0]]
155 ; CHECK-NEXT: ret i32 [[T3]]
157 %t0 = lshr i32 -1, %nbits0
158 %t1 = lshr i32 -1, %nbits1
159 %t2 = and i32 %t0, %t1 ; both hands of 'and' could be mask..
160 call void @use32(i32 %t0)
161 call void @use32(i32 %t1)
162 call void @use32(i32 %t2)
163 %t3 = shl i32 %t2, %nbits0
166 define i32 @t7_commutativity2(i32 %nbits0, i32 %nbits1) {
167 ; CHECK-LABEL: @t7_commutativity2(
168 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS0:%.*]]
169 ; CHECK-NEXT: [[T1:%.*]] = lshr i32 -1, [[NBITS1:%.*]]
170 ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[T1]]
171 ; CHECK-NEXT: call void @use32(i32 [[T0]])
172 ; CHECK-NEXT: call void @use32(i32 [[T1]])
173 ; CHECK-NEXT: call void @use32(i32 [[T2]])
174 ; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T2]], [[NBITS1]]
175 ; CHECK-NEXT: ret i32 [[T3]]
177 %t0 = lshr i32 -1, %nbits0
178 %t1 = lshr i32 -1, %nbits1
179 %t2 = and i32 %t0, %t1 ; both hands of 'and' could be mask..
180 call void @use32(i32 %t0)
181 call void @use32(i32 %t1)
182 call void @use32(i32 %t2)
183 %t3 = shl i32 %t2, %nbits1
187 ; Fast-math flags. We must not preserve them!
189 define i32 @t8_nuw(i32 %x, i32 %nbits) {
190 ; CHECK-LABEL: @t8_nuw(
191 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]]
192 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]]
193 ; CHECK-NEXT: call void @use32(i32 [[T0]])
194 ; CHECK-NEXT: call void @use32(i32 [[T1]])
195 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]]
196 ; CHECK-NEXT: ret i32 [[T2]]
198 %t0 = lshr i32 -1, %nbits
199 %t1 = and i32 %t0, %x
200 call void @use32(i32 %t0)
201 call void @use32(i32 %t1)
202 %t2 = shl nuw i32 %t1, %nbits
206 define i32 @t9_nsw(i32 %x, i32 %nbits) {
207 ; CHECK-LABEL: @t9_nsw(
208 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]]
209 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]]
210 ; CHECK-NEXT: call void @use32(i32 [[T0]])
211 ; CHECK-NEXT: call void @use32(i32 [[T1]])
212 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]]
213 ; CHECK-NEXT: ret i32 [[T2]]
215 %t0 = lshr i32 -1, %nbits
216 %t1 = and i32 %t0, %x
217 call void @use32(i32 %t0)
218 call void @use32(i32 %t1)
219 %t2 = shl nsw i32 %t1, %nbits
223 define i32 @t10_nuw_nsw(i32 %x, i32 %nbits) {
224 ; CHECK-LABEL: @t10_nuw_nsw(
225 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]]
226 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]]
227 ; CHECK-NEXT: call void @use32(i32 [[T0]])
228 ; CHECK-NEXT: call void @use32(i32 [[T1]])
229 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[X]], [[NBITS]]
230 ; CHECK-NEXT: ret i32 [[T2]]
232 %t0 = lshr i32 -1, %nbits
233 %t1 = and i32 %t0, %x
234 call void @use32(i32 %t0)
235 call void @use32(i32 %t1)
236 %t2 = shl nuw nsw i32 %t1, %nbits
242 declare void @llvm.assume(i1 %cond)
244 ; We can't simplify (%shiftnbits-%masknbits) but we have an assumption.
245 define i32 @t11_assume_uge(i32 %x, i32 %masknbits, i32 %shiftnbits) {
246 ; CHECK-LABEL: @t11_assume_uge(
247 ; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[SHIFTNBITS:%.*]], [[MASKNBITS:%.*]]
248 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
249 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[MASKNBITS]]
250 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]]
251 ; CHECK-NEXT: call void @use32(i32 [[T0]])
252 ; CHECK-NEXT: call void @use32(i32 [[T1]])
253 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[T1]], [[SHIFTNBITS]]
254 ; CHECK-NEXT: ret i32 [[T2]]
256 %cmp = icmp uge i32 %shiftnbits, %masknbits
257 call void @llvm.assume(i1 %cmp)
258 %t0 = lshr i32 -1, %masknbits
259 %t1 = and i32 %t0, %x
260 call void @use32(i32 %t0)
261 call void @use32(i32 %t1)
262 %t2 = shl i32 %t1, %shiftnbits
268 define i32 @n12_not_minus_one(i32 %x, i32 %nbits) {
269 ; CHECK-LABEL: @n12_not_minus_one(
270 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -2, [[NBITS:%.*]]
271 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]]
272 ; CHECK-NEXT: call void @use32(i32 [[T0]])
273 ; CHECK-NEXT: call void @use32(i32 [[T1]])
274 ; CHECK-NEXT: [[T2:%.*]] = shl i32 [[T1]], [[NBITS]]
275 ; CHECK-NEXT: ret i32 [[T2]]
277 %t0 = lshr i32 -2, %nbits ; shifting not '-1'
278 %t1 = and i32 %t0, %x
279 call void @use32(i32 %t0)
280 call void @use32(i32 %t1)
281 %t2 = shl i32 %t1, %nbits
285 define i32 @n13_shamt_is_smaller(i32 %x, i32 %nbits) {
286 ; CHECK-LABEL: @n13_shamt_is_smaller(
287 ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]]
288 ; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]]
289 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1
290 ; CHECK-NEXT: call void @use32(i32 [[T0]])
291 ; CHECK-NEXT: call void @use32(i32 [[T1]])
292 ; CHECK-NEXT: call void @use32(i32 [[T2]])
293 ; CHECK-NEXT: ret i32 [[T2]]
295 %t0 = lshr i32 -1, %nbits
296 %t1 = and i32 %t0, %x
297 %t2 = add i32 %nbits, -1
298 call void @use32(i32 %t0)
299 call void @use32(i32 %t1)
300 call void @use32(i32 %t2)
301 %t3 = shl i32 %t1, %t2 ; shift is smaller than mask