1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt %s -instsimplify -S | FileCheck %s
4 declare { i4, i1 } @llvm.smul.with.overflow.i4(i4, i4) #1
6 define i1 @t0_smul(i4 %size, i4 %nmemb) {
7 ; CHECK-LABEL: @t0_smul(
8 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
9 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
10 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
11 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
12 ; CHECK-NEXT: ret i1 [[AND]]
14 %cmp = icmp ne i4 %size, 0
15 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
16 %smul.ov = extractvalue { i4, i1 } %smul, 1
17 %and = and i1 %smul.ov, %cmp
21 define i1 @t1_commutative(i4 %size, i4 %nmemb) {
22 ; CHECK-LABEL: @t1_commutative(
23 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
24 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
25 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
26 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[CMP]], [[SMUL_OV]]
27 ; CHECK-NEXT: ret i1 [[AND]]
29 %cmp = icmp ne i4 %size, 0
30 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
31 %smul.ov = extractvalue { i4, i1 } %smul, 1
32 %and = and i1 %cmp, %smul.ov ; swapped
36 define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
37 ; CHECK-LABEL: @n2_wrong_size(
38 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE1:%.*]], 0
39 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]])
40 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
41 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
42 ; CHECK-NEXT: ret i1 [[AND]]
44 %cmp = icmp ne i4 %size1, 0 ; not %size0
45 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size0, i4 %nmemb)
46 %smul.ov = extractvalue { i4, i1 } %smul, 1
47 %and = and i1 %smul.ov, %cmp
51 define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
52 ; CHECK-LABEL: @n3_wrong_pred(
53 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
54 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
55 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
56 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
57 ; CHECK-NEXT: ret i1 [[AND]]
59 %cmp = icmp eq i4 %size, 0 ; not 'ne'
60 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
61 %smul.ov = extractvalue { i4, i1 } %smul, 1
62 %and = and i1 %smul.ov, %cmp
66 define i1 @n4_not_and(i4 %size, i4 %nmemb) {
67 ; CHECK-LABEL: @n4_not_and(
68 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
69 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
70 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
71 ; CHECK-NEXT: [[AND:%.*]] = or i1 [[SMUL_OV]], [[CMP]]
72 ; CHECK-NEXT: ret i1 [[AND]]
74 %cmp = icmp ne i4 %size, 0
75 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
76 %smul.ov = extractvalue { i4, i1 } %smul, 1
77 %and = or i1 %smul.ov, %cmp ; not 'and'
81 define i1 @n5_not_zero(i4 %size, i4 %nmemb) {
82 ; CHECK-LABEL: @n5_not_zero(
83 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 1
84 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
85 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
86 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
87 ; CHECK-NEXT: ret i1 [[AND]]
89 %cmp = icmp ne i4 %size, 1 ; should be '0'
90 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
91 %smul.ov = extractvalue { i4, i1 } %smul, 1
92 %and = and i1 %smul.ov, %cmp