1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
4 define i64 @test_sext_zext(i16 %A) {
5 ; CHECK-LABEL: @test_sext_zext(
6 ; CHECK-NEXT: [[C2:%.*]] = zext i16 [[A:%.*]] to i64
7 ; CHECK-NEXT: ret i64 [[C2]]
9 %c1 = zext i16 %A to i32
10 %c2 = sext i32 %c1 to i64
14 define <2 x i64> @test2(<2 x i1> %A) {
15 ; CHECK-LABEL: @test2(
16 ; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i1> [[A:%.*]], <i1 true, i1 true>
17 ; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i1> [[XOR]] to <2 x i64>
18 ; CHECK-NEXT: ret <2 x i64> [[ZEXT]]
20 %xor = xor <2 x i1> %A, <i1 true, i1 true>
21 %zext = zext <2 x i1> %xor to <2 x i64>
25 define <2 x i64> @test3(<2 x i64> %A) {
26 ; CHECK-LABEL: @test3(
27 ; CHECK-NEXT: [[ZEXT:%.*]] = and <2 x i64> [[A:%.*]], <i64 23, i64 42>
28 ; CHECK-NEXT: ret <2 x i64> [[ZEXT]]
30 %trunc = trunc <2 x i64> %A to <2 x i32>
31 %and = and <2 x i32> %trunc, <i32 23, i32 42>
32 %zext = zext <2 x i32> %and to <2 x i64>
36 define <2 x i64> @test4(<2 x i64> %A) {
37 ; CHECK-LABEL: @test4(
38 ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i64> [[A:%.*]], <i64 23, i64 42>
39 ; CHECK-NEXT: [[ZEXT:%.*]] = xor <2 x i64> [[TMP1]], <i64 23, i64 42>
40 ; CHECK-NEXT: ret <2 x i64> [[ZEXT]]
42 %trunc = trunc <2 x i64> %A to <2 x i32>
43 %and = and <2 x i32> %trunc, <i32 23, i32 42>
44 %xor = xor <2 x i32> %and, <i32 23, i32 42>
45 %zext = zext <2 x i32> %xor to <2 x i64>
49 define i64 @fold_xor_zext_sandwich(i1 %a) {
50 ; CHECK-LABEL: @fold_xor_zext_sandwich(
51 ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true
52 ; CHECK-NEXT: [[ZEXT2:%.*]] = zext i1 [[TMP1]] to i64
53 ; CHECK-NEXT: ret i64 [[ZEXT2]]
55 %zext1 = zext i1 %a to i32
56 %xor = xor i32 %zext1, 1
57 %zext2 = zext i32 %xor to i64
61 define <2 x i64> @fold_xor_zext_sandwich_vec(<2 x i1> %a) {
62 ; CHECK-LABEL: @fold_xor_zext_sandwich_vec(
63 ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[A:%.*]], <i1 true, i1 true>
64 ; CHECK-NEXT: [[ZEXT2:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i64>
65 ; CHECK-NEXT: ret <2 x i64> [[ZEXT2]]
67 %zext1 = zext <2 x i1> %a to <2 x i32>
68 %xor = xor <2 x i32> %zext1, <i32 1, i32 1>
69 %zext2 = zext <2 x i32> %xor to <2 x i64>
73 ; Assert that zexts in and(zext(icmp), zext(icmp)) can be folded.
75 define i8 @fold_and_zext_icmp(i64 %a, i64 %b, i64 %c) {
76 ; CHECK-LABEL: @fold_and_zext_icmp(
77 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
78 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
79 ; CHECK-NEXT: [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]]
80 ; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
81 ; CHECK-NEXT: ret i8 [[TMP4]]
83 %1 = icmp sgt i64 %a, %b
85 %3 = icmp slt i64 %a, %c
91 ; Assert that zexts in or(zext(icmp), zext(icmp)) can be folded.
93 define i8 @fold_or_zext_icmp(i64 %a, i64 %b, i64 %c) {
94 ; CHECK-LABEL: @fold_or_zext_icmp(
95 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
96 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
97 ; CHECK-NEXT: [[TMP3:%.*]] = or i1 [[TMP1]], [[TMP2]]
98 ; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
99 ; CHECK-NEXT: ret i8 [[TMP4]]
101 %1 = icmp sgt i64 %a, %b
102 %2 = zext i1 %1 to i8
103 %3 = icmp slt i64 %a, %c
104 %4 = zext i1 %3 to i8
109 ; Assert that zexts in xor(zext(icmp), zext(icmp)) can be folded.
111 define i8 @fold_xor_zext_icmp(i64 %a, i64 %b, i64 %c) {
112 ; CHECK-LABEL: @fold_xor_zext_icmp(
113 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
114 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
115 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
116 ; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
117 ; CHECK-NEXT: ret i8 [[TMP4]]
119 %1 = icmp sgt i64 %a, %b
120 %2 = zext i1 %1 to i8
121 %3 = icmp slt i64 %a, %c
122 %4 = zext i1 %3 to i8
127 ; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded accross
128 ; nested logical operators.
130 define i8 @fold_nested_logic_zext_icmp(i64 %a, i64 %b, i64 %c, i64 %d) {
131 ; CHECK-LABEL: @fold_nested_logic_zext_icmp(
132 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
133 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
134 ; CHECK-NEXT: [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]]
135 ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[A]], [[D:%.*]]
136 ; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
137 ; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i8
138 ; CHECK-NEXT: ret i8 [[TMP6]]
140 %1 = icmp sgt i64 %a, %b
141 %2 = zext i1 %1 to i8
142 %3 = icmp slt i64 %a, %c
143 %4 = zext i1 %3 to i8
145 %6 = icmp eq i64 %a, %d
146 %7 = zext i1 %6 to i8
151 ; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
153 define i1024 @sext_zext_apint1(i77 %A) {
154 ; CHECK-LABEL: @sext_zext_apint1(
155 ; CHECK-NEXT: [[C2:%.*]] = zext i77 [[A:%.*]] to i1024
156 ; CHECK-NEXT: ret i1024 [[C2]]
158 %c1 = zext i77 %A to i533
159 %c2 = sext i533 %c1 to i1024
163 ; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
165 define i47 @sext_zext_apint2(i11 %A) {
166 ; CHECK-LABEL: @sext_zext_apint2(
167 ; CHECK-NEXT: [[C2:%.*]] = zext i11 [[A:%.*]] to i47
168 ; CHECK-NEXT: ret i47 [[C2]]
170 %c1 = zext i11 %A to i39
171 %c2 = sext i39 %c1 to i47
175 declare void @use1(i1)
176 declare void @use32(i32)
178 define i32 @masked_bit_set(i32 %x, i32 %y) {
179 ; CHECK-LABEL: @masked_bit_set(
180 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
181 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 1
182 ; CHECK-NEXT: ret i32 [[TMP2]]
185 %and = and i32 %sh1, %x
186 %cmp = icmp ne i32 %and, 0
187 %r = zext i1 %cmp to i32
191 define <2 x i32> @masked_bit_clear(<2 x i32> %x, <2 x i32> %y) {
192 ; CHECK-LABEL: @masked_bit_clear(
193 ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[X:%.*]], <i32 -1, i32 -1>
194 ; CHECK-NEXT: [[TMP2:%.*]] = lshr <2 x i32> [[TMP1]], [[Y:%.*]]
195 ; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i32> [[TMP2]], <i32 1, i32 1>
196 ; CHECK-NEXT: ret <2 x i32> [[TMP3]]
198 %sh1 = shl <2 x i32> <i32 1, i32 1>, %y
199 %and = and <2 x i32> %sh1, %x
200 %cmp = icmp eq <2 x i32> %and, zeroinitializer
201 %r = zext <2 x i1> %cmp to <2 x i32>
205 define <2 x i32> @masked_bit_set_commute(<2 x i32> %px, <2 x i32> %y) {
206 ; CHECK-LABEL: @masked_bit_set_commute(
207 ; CHECK-NEXT: [[X:%.*]] = srem <2 x i32> <i32 42, i32 3>, [[PX:%.*]]
208 ; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X]], [[Y:%.*]]
209 ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 1, i32 1>
210 ; CHECK-NEXT: ret <2 x i32> [[TMP2]]
212 %x = srem <2 x i32> <i32 42, i32 3>, %px ; thwart complexity-based canonicalization
213 %sh1 = shl <2 x i32> <i32 1, i32 1>, %y
214 %and = and <2 x i32> %x, %sh1
215 %cmp = icmp ne <2 x i32> %and, zeroinitializer
216 %r = zext <2 x i1> %cmp to <2 x i32>
220 define i32 @masked_bit_clear_commute(i32 %px, i32 %y) {
221 ; CHECK-LABEL: @masked_bit_clear_commute(
222 ; CHECK-NEXT: [[X:%.*]] = srem i32 42, [[PX:%.*]]
223 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X]], -1
224 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], [[Y:%.*]]
225 ; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 1
226 ; CHECK-NEXT: ret i32 [[TMP3]]
228 %x = srem i32 42, %px ; thwart complexity-based canonicalization
230 %and = and i32 %x, %sh1
231 %cmp = icmp eq i32 %and, 0
232 %r = zext i1 %cmp to i32
236 define i32 @masked_bit_set_use1(i32 %x, i32 %y) {
237 ; CHECK-LABEL: @masked_bit_set_use1(
238 ; CHECK-NEXT: [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
239 ; CHECK-NEXT: call void @use32(i32 [[SH1]])
240 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], [[Y]]
241 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 1
242 ; CHECK-NEXT: ret i32 [[TMP2]]
245 call void @use32(i32 %sh1)
246 %and = and i32 %sh1, %x
247 %cmp = icmp ne i32 %and, 0
248 %r = zext i1 %cmp to i32
254 define i32 @masked_bit_set_use2(i32 %x, i32 %y) {
255 ; CHECK-LABEL: @masked_bit_set_use2(
256 ; CHECK-NEXT: [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
257 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
258 ; CHECK-NEXT: call void @use32(i32 [[AND]])
259 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
260 ; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
261 ; CHECK-NEXT: ret i32 [[R]]
264 %and = and i32 %sh1, %x
265 call void @use32(i32 %and)
266 %cmp = icmp ne i32 %and, 0
267 %r = zext i1 %cmp to i32
273 define i32 @masked_bit_set_use3(i32 %x, i32 %y) {
274 ; CHECK-LABEL: @masked_bit_set_use3(
275 ; CHECK-NEXT: [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
276 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
277 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
278 ; CHECK-NEXT: call void @use1(i1 [[CMP]])
279 ; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
280 ; CHECK-NEXT: ret i32 [[R]]
283 %and = and i32 %sh1, %x
284 %cmp = icmp ne i32 %and, 0
285 call void @use1(i1 %cmp)
286 %r = zext i1 %cmp to i32
290 define i32 @masked_bit_clear_use1(i32 %x, i32 %y) {
291 ; CHECK-LABEL: @masked_bit_clear_use1(
292 ; CHECK-NEXT: [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
293 ; CHECK-NEXT: call void @use32(i32 [[SH1]])
294 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
295 ; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], [[Y]]
296 ; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 1
297 ; CHECK-NEXT: ret i32 [[TMP3]]
300 call void @use32(i32 %sh1)
301 %and = and i32 %sh1, %x
302 %cmp = icmp eq i32 %and, 0
303 %r = zext i1 %cmp to i32
309 define i32 @masked_bit_clear_use2(i32 %x, i32 %y) {
310 ; CHECK-LABEL: @masked_bit_clear_use2(
311 ; CHECK-NEXT: [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
312 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
313 ; CHECK-NEXT: call void @use32(i32 [[AND]])
314 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
315 ; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
316 ; CHECK-NEXT: ret i32 [[R]]
319 %and = and i32 %sh1, %x
320 call void @use32(i32 %and)
321 %cmp = icmp eq i32 %and, 0
322 %r = zext i1 %cmp to i32
328 define i32 @masked_bit_clear_use3(i32 %x, i32 %y) {
329 ; CHECK-LABEL: @masked_bit_clear_use3(
330 ; CHECK-NEXT: [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
331 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
332 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
333 ; CHECK-NEXT: call void @use1(i1 [[CMP]])
334 ; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
335 ; CHECK-NEXT: ret i32 [[R]]
338 %and = and i32 %sh1, %x
339 %cmp = icmp eq i32 %and, 0
340 call void @use1(i1 %cmp)
341 %r = zext i1 %cmp to i32
347 define i32 @masked_bits_set(i32 %x, i32 %y) {
348 ; CHECK-LABEL: @masked_bits_set(
349 ; CHECK-NEXT: [[SH1:%.*]] = shl i32 3, [[Y:%.*]]
350 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
351 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
352 ; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
353 ; CHECK-NEXT: ret i32 [[R]]
356 %and = and i32 %sh1, %x
357 %cmp = icmp ne i32 %and, 0
358 %r = zext i1 %cmp to i32
364 define i32 @div_bit_set(i32 %x, i32 %y) {
365 ; CHECK-LABEL: @div_bit_set(
366 ; CHECK-NEXT: [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
367 ; CHECK-NEXT: [[AND:%.*]] = sdiv i32 [[SH1]], [[X:%.*]]
368 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
369 ; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
370 ; CHECK-NEXT: ret i32 [[R]]
373 %and = sdiv i32 %sh1, %x
374 %cmp = icmp ne i32 %and, 0
375 %r = zext i1 %cmp to i32
381 define i32 @masked_bit_set_nonzero_cmp(i32 %x, i32 %y) {
382 ; CHECK-LABEL: @masked_bit_set_nonzero_cmp(
383 ; CHECK-NEXT: [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
384 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
385 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 1
386 ; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
387 ; CHECK-NEXT: ret i32 [[R]]
390 %and = and i32 %sh1, %x
391 %cmp = icmp ne i32 %and, 1
392 %r = zext i1 %cmp to i32
398 define i32 @masked_bit_wrong_pred(i32 %x, i32 %y) {
399 ; CHECK-LABEL: @masked_bit_wrong_pred(
400 ; CHECK-NEXT: [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
401 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
402 ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[AND]], 0
403 ; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
404 ; CHECK-NEXT: ret i32 [[R]]
407 %and = and i32 %sh1, %x
408 %cmp = icmp sgt i32 %and, 0
409 %r = zext i1 %cmp to i32
413 ; Assert that zext(or(masked_bit_test, icmp)) can be correctly transformed to
414 ; or(shifted_masked_bit, zext(icmp))
416 define void @zext_or_masked_bit_test(i32 %a, i32 %b, i32* %p) {
417 ; CHECK-LABEL: @zext_or_masked_bit_test
418 ; CHECK-NEXT: [[LD:%.*]] = load i32, i32* %p, align 4
419 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LD]], %b
420 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 %a, %b
421 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], 1
422 ; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[CMP]] to i32
423 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[AND]], [[EXT]]
424 ; CHECK-NEXT: store i32 [[OR]], i32* %p, align 4
425 ; CHECK-NEXT: ret void
427 %ld = load i32, i32* %p, align 4
429 %and = and i32 %shl, %a
430 %tobool = icmp ne i32 %and, 0
431 %cmp = icmp eq i32 %ld, %b
432 %or = or i1 %tobool, %cmp
433 %conv = zext i1 %or to i32
434 store i32 %conv, i32* %p, align 4