1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -passes='require<profile-summary>,function(codegenprepare)' < %s | FileCheck %s
4 target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
5 target triple = "nvptx64-nvidia-cuda"
7 ; No bypassing should be done in apparently unsuitable cases.
8 define void @Test_no_bypassing(i32 %a, i64 %b, ptr %retptr) {
9 ; CHECK-LABEL: @Test_no_bypassing(
10 ; CHECK-NEXT: [[A_1:%.*]] = zext i32 [[A:%.*]] to i64
11 ; CHECK-NEXT: [[A_2:%.*]] = sub i64 -1, [[A_1]]
12 ; CHECK-NEXT: [[RES:%.*]] = srem i64 [[A_2]], [[B:%.*]]
13 ; CHECK-NEXT: store i64 [[RES]], ptr [[RETPTR:%.*]]
14 ; CHECK-NEXT: ret void
16 %a.1 = zext i32 %a to i64
17 ; %a.2 is always negative so the division cannot be bypassed.
18 %a.2 = sub i64 -1, %a.1
19 %res = srem i64 %a.2, %b
20 store i64 %res, ptr %retptr
24 ; No OR instruction is needed if one of the operands (divisor) is known
25 ; to fit into 32 bits.
26 define void @Test_check_one_operand(i64 %a, i32 %b, ptr %retptr) {
27 ; CHECK-LABEL: @Test_check_one_operand(
28 ; CHECK-NEXT: [[B_1:%.*]] = zext i32 [[B:%.*]] to i64
29 ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[A:%.*]], -4294967296
30 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 0
31 ; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP8:%.*]]
32 ; CHECK: [[TMP4:%.*]] = trunc i64 [[B_1]] to i32
33 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[A]] to i32
34 ; CHECK-NEXT: [[TMP6:%.*]] = udiv i32 [[TMP5]], [[TMP4]]
35 ; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
36 ; CHECK-NEXT: br label [[TMP10:%.*]]
37 ; CHECK: [[TMP9:%.*]] = sdiv i64 [[A]], [[B_1]]
38 ; CHECK-NEXT: br label [[TMP10]]
39 ; CHECK: [[TMP11:%.*]] = phi i64 [ [[TMP7]], [[TMP3]] ], [ [[TMP9]], [[TMP8]] ]
40 ; CHECK-NEXT: store i64 [[TMP11]], ptr [[RETPTR:%.*]]
41 ; CHECK-NEXT: ret void
43 %b.1 = zext i32 %b to i64
44 %res = sdiv i64 %a, %b.1
45 store i64 %res, ptr %retptr
49 ; If both operands are known to fit into 32 bits, then replace the division
50 ; in-place without CFG modification.
51 define void @Test_check_none(i64 %a, i32 %b, ptr %retptr) {
52 ; CHECK-LABEL: @Test_check_none(
53 ; CHECK-NEXT: [[A_1:%.*]] = and i64 [[A:%.*]], 4294967295
54 ; CHECK-NEXT: [[B_1:%.*]] = zext i32 [[B:%.*]] to i64
55 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A_1]] to i32
56 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[B_1]] to i32
57 ; CHECK-NEXT: [[TMP3:%.*]] = udiv i32 [[TMP1]], [[TMP2]]
58 ; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
59 ; CHECK-NEXT: store i64 [[TMP4]], ptr [[RETPTR:%.*]]
60 ; CHECK-NEXT: ret void
62 %a.1 = and i64 %a, 4294967295
63 %b.1 = zext i32 %b to i64
64 %res = udiv i64 %a.1, %b.1
65 store i64 %res, ptr %retptr
69 ; In case of unsigned long division with a short dividend,
70 ; the long division is not needed any more.
71 define void @Test_special_case(i32 %a, i64 %b, ptr %retptr) {
72 ; CHECK-LABEL: @Test_special_case(
73 ; CHECK-NEXT: [[A_1:%.*]] = zext i32 [[A:%.*]] to i64
74 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i64 [[A_1]], [[B:%.*]]
75 ; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP9:%.*]]
76 ; CHECK: [[TMP3:%.*]] = trunc i64 [[B]] to i32
77 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[A_1]] to i32
78 ; CHECK-NEXT: [[TMP5:%.*]] = udiv i32 [[TMP4]], [[TMP3]]
79 ; CHECK-NEXT: [[TMP6:%.*]] = urem i32 [[TMP4]], [[TMP3]]
80 ; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64
81 ; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64
82 ; CHECK-NEXT: br label [[TMP9]]
83 ; CHECK: [[TMP10:%.*]] = phi i64 [ [[TMP7]], [[TMP2]] ], [ 0, [[TMP0:%.*]] ]
84 ; CHECK-NEXT: [[TMP11:%.*]] = phi i64 [ [[TMP8]], [[TMP2]] ], [ [[A_1]], [[TMP0]] ]
85 ; CHECK-NEXT: [[RES:%.*]] = add i64 [[TMP10]], [[TMP11]]
86 ; CHECK-NEXT: store i64 [[RES]], ptr [[RETPTR:%.*]]
87 ; CHECK-NEXT: ret void
89 %a.1 = zext i32 %a to i64
90 %div = udiv i64 %a.1, %b
91 %rem = urem i64 %a.1, %b
92 %res = add i64 %div, %rem
93 store i64 %res, ptr %retptr
98 ; Do not bypass a division if one of the operands looks like a hash value.
99 define void @Test_dont_bypass_xor(i64 %a, i64 %b, i64 %l, ptr %retptr) {
100 ; CHECK-LABEL: @Test_dont_bypass_xor(
101 ; CHECK-NEXT: [[C:%.*]] = xor i64 [[A:%.*]], [[B:%.*]]
102 ; CHECK-NEXT: [[RES:%.*]] = udiv i64 [[C]], [[L:%.*]]
103 ; CHECK-NEXT: store i64 [[RES]], ptr [[RETPTR:%.*]]
104 ; CHECK-NEXT: ret void
107 %res = udiv i64 %c, %l
108 store i64 %res, ptr %retptr
112 define void @Test_dont_bypass_phi_xor(i64 %a, i64 %b, i64 %l, ptr %retptr) {
113 ; CHECK-LABEL: @Test_dont_bypass_phi_xor(
115 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[B:%.*]], 0
116 ; CHECK-NEXT: br i1 [[CMP]], label [[MERGE:%.*]], label [[XORPATH:%.*]]
118 ; CHECK-NEXT: [[C:%.*]] = xor i64 [[A:%.*]], [[B]]
119 ; CHECK-NEXT: br label [[MERGE]]
121 ; CHECK-NEXT: [[E:%.*]] = phi i64 [ undef, [[ENTRY:%.*]] ], [ [[C]], [[XORPATH]] ]
122 ; CHECK-NEXT: [[RES:%.*]] = sdiv i64 [[E]], [[L:%.*]]
123 ; CHECK-NEXT: store i64 [[RES]], ptr [[RETPTR:%.*]]
124 ; CHECK-NEXT: ret void
127 %cmp = icmp eq i64 %b, 0
128 br i1 %cmp, label %merge, label %xorpath
135 %e = phi i64 [ undef, %entry ], [ %c, %xorpath ]
136 %res = sdiv i64 %e, %l
137 store i64 %res, ptr %retptr
141 define void @Test_dont_bypass_mul_long_const(i64 %a, i64 %l, ptr %retptr) {
142 ; CHECK-LABEL: @Test_dont_bypass_mul_long_const(
143 ; CHECK-NEXT: [[C:%.*]] = mul i64 [[A:%.*]], 5229553307
144 ; CHECK-NEXT: [[RES:%.*]] = urem i64 [[C]], [[L:%.*]]
145 ; CHECK-NEXT: store i64 [[RES]], ptr [[RETPTR:%.*]]
146 ; CHECK-NEXT: ret void
148 %c = mul i64 %a, 5229553307 ; the constant doesn't fit 32 bits
149 %res = urem i64 %c, %l
150 store i64 %res, ptr %retptr
154 define void @Test_bypass_phi_mul_const(i64 %a, i64 %b, ptr %retptr) {
155 ; CHECK-LABEL: @Test_bypass_phi_mul_const(
157 ; CHECK-NEXT: [[A_MUL:%.*]] = mul nsw i64 [[A:%.*]], 34806414968801
158 ; CHECK-NEXT: [[P:%.*]] = icmp sgt i64 [[A]], [[B:%.*]]
159 ; CHECK-NEXT: br i1 [[P]], label [[BRANCH:%.*]], label [[MERGE:%.*]]
161 ; CHECK-NEXT: br label [[MERGE]]
163 ; CHECK-NEXT: [[LHS:%.*]] = phi i64 [ 42, [[BRANCH]] ], [ [[A_MUL]], [[ENTRY:%.*]] ]
164 ; CHECK-NEXT: [[TMP0:%.*]] = or i64 [[LHS]], [[B]]
165 ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], -4294967296
166 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 0
167 ; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP8:%.*]]
168 ; CHECK: [[TMP4:%.*]] = trunc i64 [[B]] to i32
169 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[LHS]] to i32
170 ; CHECK-NEXT: [[TMP6:%.*]] = udiv i32 [[TMP5]], [[TMP4]]
171 ; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64
172 ; CHECK-NEXT: br label [[TMP10:%.*]]
173 ; CHECK: [[TMP9:%.*]] = sdiv i64 [[LHS]], [[B]]
174 ; CHECK-NEXT: br label [[TMP10]]
175 ; CHECK: [[TMP11:%.*]] = phi i64 [ [[TMP7]], [[TMP3]] ], [ [[TMP9]], [[TMP8]] ]
176 ; CHECK-NEXT: store i64 [[TMP11]], ptr [[RETPTR:%.*]]
177 ; CHECK-NEXT: ret void
180 %a.mul = mul nsw i64 %a, 34806414968801
181 %p = icmp sgt i64 %a, %b
182 br i1 %p, label %branch, label %merge
188 %lhs = phi i64 [ 42, %branch ], [ %a.mul, %entry ]
189 %res = sdiv i64 %lhs, %b
190 store i64 %res, ptr %retptr
194 define void @Test_bypass_mul_short_const(i64 %a, i64 %l, ptr %retptr) {
195 ; CHECK-LABEL: @Test_bypass_mul_short_const(
196 ; CHECK-NEXT: [[C:%.*]] = mul i64 [[A:%.*]], -42
197 ; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[C]], [[L:%.*]]
198 ; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], -4294967296
199 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[TMP2]], 0
200 ; CHECK-NEXT: br i1 [[TMP3]], label [[TMP4:%.*]], label [[TMP9:%.*]]
201 ; CHECK: [[TMP5:%.*]] = trunc i64 [[L]] to i32
202 ; CHECK-NEXT: [[TMP6:%.*]] = trunc i64 [[C]] to i32
203 ; CHECK-NEXT: [[TMP7:%.*]] = urem i32 [[TMP6]], [[TMP5]]
204 ; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
205 ; CHECK-NEXT: br label [[TMP11:%.*]]
206 ; CHECK: [[TMP10:%.*]] = urem i64 [[C]], [[L]]
207 ; CHECK-NEXT: br label [[TMP11]]
208 ; CHECK: [[TMP12:%.*]] = phi i64 [ [[TMP8]], [[TMP4]] ], [ [[TMP10]], [[TMP9]] ]
209 ; CHECK-NEXT: store i64 [[TMP12]], ptr [[RETPTR:%.*]]
210 ; CHECK-NEXT: ret void
213 %res = urem i64 %c, %l
214 store i64 %res, ptr %retptr