1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
8 declare i64 @llabs(i64)
10 ; Test that the abs library call simplifier works correctly.
11 ; abs(x) -> x <s 0 ? -x : x.
13 define i32 @test_abs(i32 %x) {
14 ; CHECK-LABEL: @test_abs(
15 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[X:%.*]], i1 true)
16 ; CHECK-NEXT: ret i32 [[TMP1]]
18 %ret = call i32 @abs(i32 %x)
22 define i64 @test_labs(i64 %x) {
23 ; CHECK-LABEL: @test_labs(
24 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.abs.i64(i64 [[X:%.*]], i1 true)
25 ; CHECK-NEXT: ret i64 [[TMP1]]
27 %ret = call i64 @labs(i64 %x)
31 define i64 @test_llabs(i64 %x) {
32 ; CHECK-LABEL: @test_llabs(
33 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.abs.i64(i64 [[X:%.*]], i1 true)
34 ; CHECK-NEXT: ret i64 [[TMP1]]
36 %ret = call i64 @llabs(i64 %x)
40 ; We have a canonical form of abs to make CSE easier.
42 define i8 @abs_canonical_1(i8 %x) {
43 ; CHECK-LABEL: @abs_canonical_1(
44 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 false)
45 ; CHECK-NEXT: ret i8 [[TMP1]]
47 %cmp = icmp sgt i8 %x, 0
49 %abs = select i1 %cmp, i8 %x, i8 %neg
53 ; Vectors should work too.
55 define <2 x i8> @abs_canonical_2(<2 x i8> %x) {
56 ; CHECK-LABEL: @abs_canonical_2(
57 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[X:%.*]], i1 false)
58 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
60 %cmp = icmp sgt <2 x i8> %x, <i8 -1, i8 -1>
61 %neg = sub <2 x i8> zeroinitializer, %x
62 %abs = select <2 x i1> %cmp, <2 x i8> %x, <2 x i8> %neg
66 ; Even if a constant has undef elements.
68 define <2 x i8> @abs_canonical_2_vec_undef_elts(<2 x i8> %x) {
69 ; CHECK-LABEL: @abs_canonical_2_vec_undef_elts(
70 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[X:%.*]], i1 false)
71 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
73 %cmp = icmp sgt <2 x i8> %x, <i8 undef, i8 -1>
74 %neg = sub <2 x i8> zeroinitializer, %x
75 %abs = select <2 x i1> %cmp, <2 x i8> %x, <2 x i8> %neg
79 ; NSW should not change.
81 define i8 @abs_canonical_3(i8 %x) {
82 ; CHECK-LABEL: @abs_canonical_3(
83 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 true)
84 ; CHECK-NEXT: ret i8 [[TMP1]]
86 %cmp = icmp slt i8 %x, 0
87 %neg = sub nsw i8 0, %x
88 %abs = select i1 %cmp, i8 %neg, i8 %x
92 define i8 @abs_canonical_4(i8 %x) {
93 ; CHECK-LABEL: @abs_canonical_4(
94 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 false)
95 ; CHECK-NEXT: ret i8 [[TMP1]]
97 %cmp = icmp slt i8 %x, 1
99 %abs = select i1 %cmp, i8 %neg, i8 %x
103 define i32 @abs_canonical_5(i8 %x) {
104 ; CHECK-LABEL: @abs_canonical_5(
105 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 false)
106 ; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i32
107 ; CHECK-NEXT: ret i32 [[TMP2]]
109 %cmp = icmp sgt i8 %x, 0
110 %conv = sext i8 %x to i32
111 %neg = sub i32 0, %conv
112 %abs = select i1 %cmp, i32 %conv, i32 %neg
116 define i32 @abs_canonical_6(i32 %a, i32 %b) {
117 ; CHECK-LABEL: @abs_canonical_6(
118 ; CHECK-NEXT: [[T1:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
119 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[T1]], i1 false)
120 ; CHECK-NEXT: ret i32 [[TMP1]]
123 %cmp = icmp sgt i32 %t1, -1
125 %abs = select i1 %cmp, i32 %t1, i32 %t2
129 define <2 x i8> @abs_canonical_7(<2 x i8> %a, <2 x i8 > %b) {
130 ; CHECK-LABEL: @abs_canonical_7(
131 ; CHECK-NEXT: [[T1:%.*]] = sub <2 x i8> [[A:%.*]], [[B:%.*]]
132 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[T1]], i1 false)
133 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
136 %t1 = sub <2 x i8> %a, %b
137 %cmp = icmp sgt <2 x i8> %t1, <i8 -1, i8 -1>
138 %t2 = sub <2 x i8> %b, %a
139 %abs = select <2 x i1> %cmp, <2 x i8> %t1, <2 x i8> %t2
143 define i32 @abs_canonical_8(i32 %a) {
144 ; CHECK-LABEL: @abs_canonical_8(
145 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[A:%.*]], i1 false)
146 ; CHECK-NEXT: ret i32 [[TMP1]]
149 %cmp = icmp slt i32 %t, 0
150 %abs = select i1 %cmp, i32 %a, i32 %t
154 define i32 @abs_canonical_9(i32 %a, i32 %b) {
155 ; CHECK-LABEL: @abs_canonical_9(
156 ; CHECK-NEXT: [[T1:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
157 ; CHECK-NEXT: [[T2:%.*]] = sub i32 [[B]], [[A]]
158 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[T1]], i1 false)
159 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP1]], [[T2]]
160 ; CHECK-NEXT: ret i32 [[ADD]]
163 %cmp = icmp sgt i32 %t1, -1
165 %abs = select i1 %cmp, i32 %t1, i32 %t2
166 %add = add i32 %abs, %t2 ; increase use count for %t2.
170 define i32 @abs_canonical_10(i32 %a, i32 %b) {
171 ; CHECK-LABEL: @abs_canonical_10(
172 ; CHECK-NEXT: [[T1:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
173 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[T1]], i1 false)
174 ; CHECK-NEXT: ret i32 [[TMP1]]
178 %cmp = icmp sgt i32 %t1, -1
179 %abs = select i1 %cmp, i32 %t1, i32 %t2
183 ; We have a canonical form of nabs to make CSE easier.
185 define i8 @nabs_canonical_1(i8 %x) {
186 ; CHECK-LABEL: @nabs_canonical_1(
187 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 false)
188 ; CHECK-NEXT: [[ABS:%.*]] = sub i8 0, [[TMP1]]
189 ; CHECK-NEXT: ret i8 [[ABS]]
191 %cmp = icmp sgt i8 %x, 0
193 %abs = select i1 %cmp, i8 %neg, i8 %x
197 ; Vectors should work too.
199 define <2 x i8> @nabs_canonical_2(<2 x i8> %x) {
200 ; CHECK-LABEL: @nabs_canonical_2(
201 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[X:%.*]], i1 false)
202 ; CHECK-NEXT: [[ABS:%.*]] = sub <2 x i8> zeroinitializer, [[TMP1]]
203 ; CHECK-NEXT: ret <2 x i8> [[ABS]]
205 %cmp = icmp sgt <2 x i8> %x, <i8 -1, i8 -1>
206 %neg = sub <2 x i8> zeroinitializer, %x
207 %abs = select <2 x i1> %cmp, <2 x i8> %neg, <2 x i8> %x
211 ; Even if a constant has undef elements.
213 define <2 x i8> @nabs_canonical_2_vec_undef_elts(<2 x i8> %x) {
214 ; CHECK-LABEL: @nabs_canonical_2_vec_undef_elts(
215 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[X:%.*]], i1 false)
216 ; CHECK-NEXT: [[ABS:%.*]] = sub <2 x i8> zeroinitializer, [[TMP1]]
217 ; CHECK-NEXT: ret <2 x i8> [[ABS]]
219 %cmp = icmp sgt <2 x i8> %x, <i8 -1, i8 undef>
220 %neg = sub <2 x i8> zeroinitializer, %x
221 %abs = select <2 x i1> %cmp, <2 x i8> %neg, <2 x i8> %x
225 ; NSW should not change.
227 define i8 @nabs_canonical_3(i8 %x) {
228 ; CHECK-LABEL: @nabs_canonical_3(
229 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 false)
230 ; CHECK-NEXT: [[ABS:%.*]] = sub i8 0, [[TMP1]]
231 ; CHECK-NEXT: ret i8 [[ABS]]
233 %cmp = icmp slt i8 %x, 0
234 %neg = sub nsw i8 0, %x
235 %abs = select i1 %cmp, i8 %x, i8 %neg
239 define i8 @nabs_canonical_4(i8 %x) {
240 ; CHECK-LABEL: @nabs_canonical_4(
241 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 false)
242 ; CHECK-NEXT: [[ABS:%.*]] = sub i8 0, [[TMP1]]
243 ; CHECK-NEXT: ret i8 [[ABS]]
245 %cmp = icmp slt i8 %x, 1
247 %abs = select i1 %cmp, i8 %x, i8 %neg
251 define i32 @nabs_canonical_5(i8 %x) {
252 ; CHECK-LABEL: @nabs_canonical_5(
253 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 false)
254 ; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i32
255 ; CHECK-NEXT: [[ABS:%.*]] = sub nsw i32 0, [[TMP2]]
256 ; CHECK-NEXT: ret i32 [[ABS]]
258 %cmp = icmp sgt i8 %x, 0
259 %conv = sext i8 %x to i32
260 %neg = sub i32 0, %conv
261 %abs = select i1 %cmp, i32 %neg, i32 %conv
265 define i32 @nabs_canonical_6(i32 %a, i32 %b) {
266 ; CHECK-LABEL: @nabs_canonical_6(
267 ; CHECK-NEXT: [[T1:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
268 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[T1]], i1 false)
269 ; CHECK-NEXT: [[ABS:%.*]] = sub i32 0, [[TMP1]]
270 ; CHECK-NEXT: ret i32 [[ABS]]
273 %cmp = icmp sgt i32 %t1, -1
275 %abs = select i1 %cmp, i32 %t2, i32 %t1
279 define <2 x i8> @nabs_canonical_7(<2 x i8> %a, <2 x i8 > %b) {
280 ; CHECK-LABEL: @nabs_canonical_7(
281 ; CHECK-NEXT: [[T1:%.*]] = sub <2 x i8> [[A:%.*]], [[B:%.*]]
282 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[T1]], i1 false)
283 ; CHECK-NEXT: [[ABS:%.*]] = sub <2 x i8> zeroinitializer, [[TMP1]]
284 ; CHECK-NEXT: ret <2 x i8> [[ABS]]
286 %t1 = sub <2 x i8> %a, %b
287 %cmp = icmp sgt <2 x i8> %t1, <i8 -1, i8 -1>
288 %t2 = sub <2 x i8> %b, %a
289 %abs = select <2 x i1> %cmp, <2 x i8> %t2, <2 x i8> %t1
293 define i32 @nabs_canonical_8(i32 %a) {
294 ; CHECK-LABEL: @nabs_canonical_8(
295 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[A:%.*]], i1 false)
296 ; CHECK-NEXT: [[ABS:%.*]] = sub i32 0, [[TMP1]]
297 ; CHECK-NEXT: ret i32 [[ABS]]
300 %cmp = icmp slt i32 %t, 0
301 %abs = select i1 %cmp, i32 %t, i32 %a
305 define i32 @nabs_canonical_9(i32 %a, i32 %b) {
306 ; CHECK-LABEL: @nabs_canonical_9(
307 ; CHECK-NEXT: [[T1:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
308 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[T1]], i1 false)
309 ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[A]]
310 ; CHECK-NEXT: [[ADD:%.*]] = sub i32 [[B]], [[TMP2]]
311 ; CHECK-NEXT: ret i32 [[ADD]]
314 %cmp = icmp sgt i32 %t1, -1
316 %abs = select i1 %cmp, i32 %t2, i32 %t1
317 %add = add i32 %t2, %abs ; increase use count for %t2
321 define i32 @nabs_canonical_10(i32 %a, i32 %b) {
322 ; CHECK-LABEL: @nabs_canonical_10(
323 ; CHECK-NEXT: [[T1:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
324 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[T1]], i1 false)
325 ; CHECK-NEXT: [[ABS:%.*]] = sub i32 0, [[TMP1]]
326 ; CHECK-NEXT: ret i32 [[ABS]]
330 %cmp = icmp slt i32 %t1, 1
331 %abs = select i1 %cmp, i32 %t1, i32 %t2
335 ; The following 5 tests use a shift+add+xor to implement abs():
336 ; B = ashr i8 A, 7 -- smear the sign bit.
337 ; xor (add A, B), B -- add -1 and flip bits if negative
339 define i8 @shifty_abs_commute0(i8 %x) {
340 ; CHECK-LABEL: @shifty_abs_commute0(
341 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 false)
342 ; CHECK-NEXT: ret i8 [[TMP1]]
344 %signbit = ashr i8 %x, 7
345 %add = add i8 %signbit, %x
346 %abs = xor i8 %add, %signbit
350 define i8 @shifty_abs_commute0_nsw(i8 %x) {
351 ; CHECK-LABEL: @shifty_abs_commute0_nsw(
352 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 true)
353 ; CHECK-NEXT: ret i8 [[TMP1]]
355 %signbit = ashr i8 %x, 7
356 %add = add nsw i8 %signbit, %x
357 %abs = xor i8 %add, %signbit
361 ; The nuw flag creates a contradiction. If the shift produces all 1s, the only
362 ; way for the add to not wrap is for %x to be 0, but then the shift couldn't
363 ; have produced all 1s. We partially optimize this.
364 define i8 @shifty_abs_commute0_nuw(i8 %x) {
365 ; CHECK-LABEL: @shifty_abs_commute0_nuw(
366 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i8 [[X:%.*]], 0
367 ; CHECK-NEXT: [[ABS:%.*]] = select i1 [[TMP1]], i8 [[X]], i8 0
368 ; CHECK-NEXT: ret i8 [[ABS]]
370 %signbit = ashr i8 %x, 7
371 %add = add nuw i8 %signbit, %x
372 %abs = xor i8 %add, %signbit
376 define <2 x i8> @shifty_abs_commute1(<2 x i8> %x) {
377 ; CHECK-LABEL: @shifty_abs_commute1(
378 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[X:%.*]], i1 false)
379 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
381 %signbit = ashr <2 x i8> %x, <i8 7, i8 7>
382 %add = add <2 x i8> %signbit, %x
383 %abs = xor <2 x i8> %signbit, %add
387 define <2 x i8> @shifty_abs_commute2(<2 x i8> %x) {
388 ; CHECK-LABEL: @shifty_abs_commute2(
389 ; CHECK-NEXT: [[Y:%.*]] = mul <2 x i8> [[X:%.*]], <i8 3, i8 3>
390 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[Y]], i1 false)
391 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
393 %y = mul <2 x i8> %x, <i8 3, i8 3> ; extra op to thwart complexity-based canonicalization
394 %signbit = ashr <2 x i8> %y, <i8 7, i8 7>
395 %add = add <2 x i8> %y, %signbit
396 %abs = xor <2 x i8> %signbit, %add
400 define i8 @shifty_abs_commute3(i8 %x) {
401 ; CHECK-LABEL: @shifty_abs_commute3(
402 ; CHECK-NEXT: [[Y:%.*]] = mul i8 [[X:%.*]], 3
403 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[Y]], i1 false)
404 ; CHECK-NEXT: ret i8 [[TMP1]]
406 %y = mul i8 %x, 3 ; extra op to thwart complexity-based canonicalization
407 %signbit = ashr i8 %y, 7
408 %add = add i8 %y, %signbit
409 %abs = xor i8 %add, %signbit
413 ; Negative test - don't transform if it would increase instruction count.
415 declare void @extra_use(i8)
416 declare void @extra_use_i1(i1)
418 define i8 @shifty_abs_too_many_uses(i8 %x) {
419 ; CHECK-LABEL: @shifty_abs_too_many_uses(
420 ; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X:%.*]], 7
421 ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SIGNBIT]], [[X]]
422 ; CHECK-NEXT: [[ABS:%.*]] = xor i8 [[ADD]], [[SIGNBIT]]
423 ; CHECK-NEXT: call void @extra_use(i8 [[SIGNBIT]])
424 ; CHECK-NEXT: ret i8 [[ABS]]
426 %signbit = ashr i8 %x, 7
427 %add = add i8 %x, %signbit
428 %abs = xor i8 %add, %signbit
429 call void @extra_use(i8 %signbit)
433 ; There's another way to make abs() using shift, xor, and subtract.
434 ; PR36036 - https://bugs.llvm.org/show_bug.cgi?id=36036
436 define i8 @shifty_sub(i8 %x) {
437 ; CHECK-LABEL: @shifty_sub(
438 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 false)
439 ; CHECK-NEXT: ret i8 [[TMP1]]
442 %xor = xor i8 %x, %sh
443 %r = sub i8 %xor, %sh
447 define i8 @shifty_sub_nsw_commute(i8 %x) {
448 ; CHECK-LABEL: @shifty_sub_nsw_commute(
449 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 true)
450 ; CHECK-NEXT: ret i8 [[TMP1]]
453 %xor = xor i8 %sh, %x
454 %r = sub nsw i8 %xor, %sh
458 define <4 x i32> @shifty_sub_nuw_vec_commute(<4 x i32> %x) {
459 ; CHECK-LABEL: @shifty_sub_nuw_vec_commute(
460 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt <4 x i32> [[X:%.*]], zeroinitializer
461 ; CHECK-NEXT: [[R:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[X]], <4 x i32> zeroinitializer
462 ; CHECK-NEXT: ret <4 x i32> [[R]]
464 %sh = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
465 %xor = xor <4 x i32> %sh, %x
466 %r = sub nuw <4 x i32> %xor, %sh
470 define i12 @shifty_sub_nsw_nuw(i12 %x) {
471 ; CHECK-LABEL: @shifty_sub_nsw_nuw(
472 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i12 [[X:%.*]], 0
473 ; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i12 [[X]], i12 0
474 ; CHECK-NEXT: ret i12 [[R]]
476 %sh = ashr i12 %x, 11
477 %xor = xor i12 %x, %sh
478 %r = sub nsw nuw i12 %xor, %sh
482 define i8 @negate_abs(i8 %x) {
483 ; CHECK-LABEL: @negate_abs(
484 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 false)
485 ; CHECK-NEXT: [[R:%.*]] = sub i8 0, [[TMP1]]
486 ; CHECK-NEXT: ret i8 [[R]]
489 %c = icmp slt i8 %x, 0
490 %s = select i1 %c, i8 %n, i8 %x
495 define <2 x i8> @negate_nabs(<2 x i8> %x) {
496 ; CHECK-LABEL: @negate_nabs(
497 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[X:%.*]], i1 false)
498 ; CHECK-NEXT: ret <2 x i8> [[TMP1]]
500 %n = sub <2 x i8> zeroinitializer, %x
501 %c = icmp slt <2 x i8> %x, zeroinitializer
502 %s = select <2 x i1> %c, <2 x i8> %x, <2 x i8> %n
503 %r = sub <2 x i8> zeroinitializer, %s
507 define i1 @abs_must_be_positive(i32 %x) {
508 ; CHECK-LABEL: @abs_must_be_positive(
509 ; CHECK-NEXT: ret i1 true
511 %negx = sub nsw i32 0, %x
512 %c = icmp sge i32 %x, 0
513 %sel = select i1 %c, i32 %x, i32 %negx
514 %c2 = icmp sge i32 %sel, 0
518 define i8 @abs_swapped(i8 %a) {
519 ; CHECK-LABEL: @abs_swapped(
520 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
521 ; CHECK-NEXT: call void @extra_use(i8 [[NEG]])
522 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[A]], i1 false)
523 ; CHECK-NEXT: ret i8 [[TMP1]]
526 call void @extra_use(i8 %neg)
527 %cmp1 = icmp sgt i8 %a, 0
528 %m1 = select i1 %cmp1, i8 %a, i8 %neg
532 define i8 @nabs_swapped(i8 %a) {
533 ; CHECK-LABEL: @nabs_swapped(
534 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
535 ; CHECK-NEXT: call void @extra_use(i8 [[NEG]])
536 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[A]], i1 false)
537 ; CHECK-NEXT: [[M2:%.*]] = sub i8 0, [[TMP1]]
538 ; CHECK-NEXT: ret i8 [[M2]]
541 call void @extra_use(i8 %neg)
542 %cmp2 = icmp sgt i8 %a, 0
543 %m2 = select i1 %cmp2, i8 %neg, i8 %a
547 define i8 @abs_different_constants(i8 %a) {
548 ; CHECK-LABEL: @abs_different_constants(
549 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
550 ; CHECK-NEXT: call void @extra_use(i8 [[NEG]])
551 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[A]], i1 false)
552 ; CHECK-NEXT: ret i8 [[TMP1]]
555 call void @extra_use(i8 %neg)
556 %cmp1 = icmp sgt i8 %a, -1
557 %m1 = select i1 %cmp1, i8 %a, i8 %neg
561 define i8 @nabs_different_constants(i8 %a) {
562 ; CHECK-LABEL: @nabs_different_constants(
563 ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[A:%.*]]
564 ; CHECK-NEXT: call void @extra_use(i8 [[NEG]])
565 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[A]], i1 false)
566 ; CHECK-NEXT: [[M2:%.*]] = sub i8 0, [[TMP1]]
567 ; CHECK-NEXT: ret i8 [[M2]]
570 call void @extra_use(i8 %neg)
571 %cmp2 = icmp sgt i8 %a, -1
572 %m2 = select i1 %cmp2, i8 %neg, i8 %a
576 @g = external global i64
578 ; PR45539 - https://bugs.llvm.org/show_bug.cgi?id=45539
580 define i64 @infinite_loop_constant_expression_abs(i64 %arg) {
581 ; CHECK-LABEL: @infinite_loop_constant_expression_abs(
582 ; CHECK-NEXT: [[T:%.*]] = sub i64 ptrtoint (i64* @g to i64), [[ARG:%.*]]
583 ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.abs.i64(i64 [[T]], i1 true)
584 ; CHECK-NEXT: ret i64 [[TMP1]]
586 %t = sub i64 ptrtoint (i64* @g to i64), %arg
587 %t1 = icmp slt i64 %t, 0
588 %t2 = sub nsw i64 0, %t
589 %t3 = select i1 %t1, i64 %t2, i64 %t
593 define i8 @abs_extra_use_icmp(i8 %x) {
594 ; CHECK-LABEL: @abs_extra_use_icmp(
595 ; CHECK-NEXT: [[C:%.*]] = icmp slt i8 [[X:%.*]], 0
596 ; CHECK-NEXT: call void @extra_use_i1(i1 [[C]])
597 ; CHECK-NEXT: [[N:%.*]] = sub i8 0, [[X]]
598 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[N]], i8 [[X]]
599 ; CHECK-NEXT: ret i8 [[S]]
601 %c = icmp slt i8 %x, 0
602 call void @extra_use_i1(i1 %c)
604 %s = select i1 %c, i8 %n, i8 %x
608 define i8 @abs_extra_use_sub(i8 %x) {
609 ; CHECK-LABEL: @abs_extra_use_sub(
610 ; CHECK-NEXT: [[N:%.*]] = sub i8 0, [[X:%.*]]
611 ; CHECK-NEXT: call void @extra_use(i8 [[N]])
612 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X]], i1 false)
613 ; CHECK-NEXT: ret i8 [[TMP1]]
615 %c = icmp slt i8 %x, 0
617 call void @extra_use(i8 %n)
618 %s = select i1 %c, i8 %n, i8 %x
622 define i8 @abs_extra_use_icmp_sub(i8 %x) {
623 ; CHECK-LABEL: @abs_extra_use_icmp_sub(
624 ; CHECK-NEXT: [[C:%.*]] = icmp slt i8 [[X:%.*]], 0
625 ; CHECK-NEXT: call void @extra_use_i1(i1 [[C]])
626 ; CHECK-NEXT: [[N:%.*]] = sub i8 0, [[X]]
627 ; CHECK-NEXT: call void @extra_use(i8 [[N]])
628 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[N]], i8 [[X]]
629 ; CHECK-NEXT: ret i8 [[S]]
631 %c = icmp slt i8 %x, 0
632 call void @extra_use_i1(i1 %c)
634 call void @extra_use(i8 %n)
635 %s = select i1 %c, i8 %n, i8 %x
639 define i8 @nabs_extra_use_icmp(i8 %x) {
640 ; CHECK-LABEL: @nabs_extra_use_icmp(
641 ; CHECK-NEXT: [[C:%.*]] = icmp slt i8 [[X:%.*]], 0
642 ; CHECK-NEXT: call void @extra_use_i1(i1 [[C]])
643 ; CHECK-NEXT: [[N:%.*]] = sub i8 0, [[X]]
644 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[X]], i8 [[N]]
645 ; CHECK-NEXT: ret i8 [[S]]
647 %c = icmp slt i8 %x, 0
648 call void @extra_use_i1(i1 %c)
650 %s = select i1 %c, i8 %x, i8 %n
654 define i8 @nabs_extra_use_sub(i8 %x) {
655 ; CHECK-LABEL: @nabs_extra_use_sub(
656 ; CHECK-NEXT: [[N:%.*]] = sub i8 0, [[X:%.*]]
657 ; CHECK-NEXT: call void @extra_use(i8 [[N]])
658 ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X]], i1 false)
659 ; CHECK-NEXT: [[S:%.*]] = sub i8 0, [[TMP1]]
660 ; CHECK-NEXT: ret i8 [[S]]
662 %c = icmp slt i8 %x, 0
664 call void @extra_use(i8 %n)
665 %s = select i1 %c, i8 %x, i8 %n
669 define i8 @nabs_extra_use_icmp_sub(i8 %x) {
670 ; CHECK-LABEL: @nabs_extra_use_icmp_sub(
671 ; CHECK-NEXT: [[C:%.*]] = icmp slt i8 [[X:%.*]], 0
672 ; CHECK-NEXT: call void @extra_use_i1(i1 [[C]])
673 ; CHECK-NEXT: [[N:%.*]] = sub i8 0, [[X]]
674 ; CHECK-NEXT: call void @extra_use(i8 [[N]])
675 ; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[X]], i8 [[N]]
676 ; CHECK-NEXT: ret i8 [[S]]
678 %c = icmp slt i8 %x, 0
679 call void @extra_use_i1(i1 %c)
681 call void @extra_use(i8 %n)
682 %s = select i1 %c, i8 %x, i8 %n