1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=indvars -S %s | FileCheck %s
4 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
6 define float @ashr_expansion_valid(i64 %x, ptr %ptr) {
7 ; CHECK-LABEL: @ashr_expansion_valid(
9 ; CHECK-NEXT: [[BOUND:%.*]] = ashr exact i64 [[X:%.*]], 4
10 ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[BOUND]], i64 1)
11 ; CHECK-NEXT: br label [[LOOP:%.*]]
13 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
14 ; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
15 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR:%.*]], i64 [[IV]]
16 ; CHECK-NEXT: [[LV:%.*]] = load float, ptr [[GEP]], align 4
17 ; CHECK-NEXT: [[RED_NEXT]] = fadd float [[LV]], [[RED]]
18 ; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
19 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[UMAX]]
20 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT:%.*]]
22 ; CHECK-NEXT: [[LCSSA_RED_NEXT:%.*]] = phi float [ [[RED_NEXT]], [[LOOP]] ]
23 ; CHECK-NEXT: ret float [[LCSSA_RED_NEXT]]
26 %bound = ashr exact i64 %x, 4
30 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
31 %red = phi float [ 0.0, %entry ], [ %red.next, %loop ]
32 %gep = getelementptr float, ptr %ptr, i64 %iv
33 %lv = load float, ptr %gep
34 %red.next = fadd float %lv, %red
35 %iv.next = add nuw i64 %iv, 1
36 %cond = icmp ult i64 %iv.next, %bound
37 br i1 %cond, label %loop, label %exit
39 exit: ; preds = %bb135
40 %lcssa.red.next = phi float [ %red.next, %loop ]
41 ret float %lcssa.red.next
44 ; No explicit ashr, but a chain of operations that can be replaced by ashr.
45 define float @ashr_equivalent_expansion(i64 %x, ptr %ptr) {
46 ; CHECK-LABEL: @ashr_equivalent_expansion(
48 ; CHECK-NEXT: [[ABS_X:%.*]] = call i64 @llvm.abs.i64(i64 [[X:%.*]], i1 false)
49 ; CHECK-NEXT: [[DIV:%.*]] = udiv exact i64 [[ABS_X]], 16
50 ; CHECK-NEXT: [[T0:%.*]] = call i64 @llvm.smax.i64(i64 [[X]], i64 -1)
51 ; CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.smin.i64(i64 [[T0]], i64 1)
52 ; CHECK-NEXT: [[BOUND:%.*]] = mul nsw i64 [[DIV]], [[T1]]
53 ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[BOUND]], i64 1)
54 ; CHECK-NEXT: br label [[LOOP:%.*]]
56 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
57 ; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
58 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR:%.*]], i64 [[IV]]
59 ; CHECK-NEXT: [[LV:%.*]] = load float, ptr [[GEP]], align 4
60 ; CHECK-NEXT: [[RED_NEXT]] = fadd float [[LV]], [[RED]]
61 ; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
62 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[UMAX]]
63 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT:%.*]]
65 ; CHECK-NEXT: [[LCSSA_RED_NEXT:%.*]] = phi float [ [[RED_NEXT]], [[LOOP]] ]
66 ; CHECK-NEXT: ret float [[LCSSA_RED_NEXT]]
69 %abs_x = call i64 @llvm.abs.i64(i64 %x, i1 false)
70 %div = udiv exact i64 %abs_x, 16
71 %t0 = call i64 @llvm.smax.i64(i64 %x, i64 -1)
72 %t1 = call i64 @llvm.smin.i64(i64 %t0, i64 1)
73 %bound = mul nsw i64 %div, %t1
77 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
78 %red = phi float [ 0.0, %entry ], [ %red.next, %loop ]
79 %gep = getelementptr float, ptr %ptr, i64 %iv
80 %lv = load float, ptr %gep
81 %red.next = fadd float %lv, %red
82 %iv.next = add nuw i64 %iv, 1
83 %cond = icmp ult i64 %iv.next, %bound
84 br i1 %cond, label %loop, label %exit
86 exit: ; preds = %bb135
87 %lcssa.red.next = phi float [ %red.next, %loop ]
88 ret float %lcssa.red.next
91 ; Chain of operations that *cannot* be replaced by ashr, because the udiv is
93 define float @no_ashr_due_to_missing_exact_udiv(i64 %x, ptr %ptr) {
94 ; CHECK-LABEL: @no_ashr_due_to_missing_exact_udiv(
96 ; CHECK-NEXT: [[ABS_X:%.*]] = call i64 @llvm.abs.i64(i64 [[X:%.*]], i1 false)
97 ; CHECK-NEXT: [[DIV:%.*]] = udiv i64 [[ABS_X]], 16
98 ; CHECK-NEXT: [[T0:%.*]] = call i64 @llvm.smax.i64(i64 [[X]], i64 -1)
99 ; CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.smin.i64(i64 [[T0]], i64 1)
100 ; CHECK-NEXT: [[BOUND:%.*]] = mul nsw i64 [[DIV]], [[T1]]
101 ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[BOUND]], i64 1)
102 ; CHECK-NEXT: br label [[LOOP:%.*]]
104 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
105 ; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
106 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR:%.*]], i64 [[IV]]
107 ; CHECK-NEXT: [[LV:%.*]] = load float, ptr [[GEP]], align 4
108 ; CHECK-NEXT: [[RED_NEXT]] = fadd float [[LV]], [[RED]]
109 ; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
110 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[UMAX]]
111 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT:%.*]]
113 ; CHECK-NEXT: [[LCSSA_RED_NEXT:%.*]] = phi float [ [[RED_NEXT]], [[LOOP]] ]
114 ; CHECK-NEXT: ret float [[LCSSA_RED_NEXT]]
117 %abs_x = call i64 @llvm.abs.i64(i64 %x, i1 false)
118 %div = udiv i64 %abs_x, 16
119 %t0 = call i64 @llvm.smax.i64(i64 %x, i64 -1)
120 %t1 = call i64 @llvm.smin.i64(i64 %t0, i64 1)
121 %bound = mul nsw i64 %div, %t1
125 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
126 %red = phi float [ 0.0, %entry ], [ %red.next, %loop ]
127 %gep = getelementptr float, ptr %ptr, i64 %iv
128 %lv = load float, ptr %gep
129 %red.next = fadd float %lv, %red
130 %iv.next = add nuw i64 %iv, 1
131 %cond = icmp ult i64 %iv.next, %bound
132 br i1 %cond, label %loop, label %exit
134 exit: ; preds = %bb135
135 %lcssa.red.next = phi float [ %red.next, %loop ]
136 ret float %lcssa.red.next
139 ; Chain of operations that *cannot* be replaced by ashr, because abs and
140 ; signum have different operands.
141 define float @no_ashr_due_to_different_ops(i64 %x, i64 %y, ptr %ptr) {
142 ; CHECK-LABEL: @no_ashr_due_to_different_ops(
144 ; CHECK-NEXT: [[ABS_X:%.*]] = call i64 @llvm.abs.i64(i64 [[X:%.*]], i1 false)
145 ; CHECK-NEXT: [[DIV:%.*]] = udiv i64 [[ABS_X]], 16
146 ; CHECK-NEXT: [[T0:%.*]] = call i64 @llvm.smax.i64(i64 [[Y:%.*]], i64 -1)
147 ; CHECK-NEXT: [[T1:%.*]] = call i64 @llvm.smin.i64(i64 [[T0]], i64 1)
148 ; CHECK-NEXT: [[BOUND:%.*]] = mul nsw i64 [[DIV]], [[T1]]
149 ; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[BOUND]], i64 1)
150 ; CHECK-NEXT: br label [[LOOP:%.*]]
152 ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
153 ; CHECK-NEXT: [[RED:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
154 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR:%.*]], i64 [[IV]]
155 ; CHECK-NEXT: [[LV:%.*]] = load float, ptr [[GEP]], align 4
156 ; CHECK-NEXT: [[RED_NEXT]] = fadd float [[LV]], [[RED]]
157 ; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 1
158 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[UMAX]]
159 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT:%.*]]
161 ; CHECK-NEXT: [[LCSSA_RED_NEXT:%.*]] = phi float [ [[RED_NEXT]], [[LOOP]] ]
162 ; CHECK-NEXT: ret float [[LCSSA_RED_NEXT]]
165 %abs_x = call i64 @llvm.abs.i64(i64 %x, i1 false)
166 %div = udiv i64 %abs_x, 16
167 %t0 = call i64 @llvm.smax.i64(i64 %y, i64 -1)
168 %t1 = call i64 @llvm.smin.i64(i64 %t0, i64 1)
169 %bound = mul nsw i64 %div, %t1
173 %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
174 %red = phi float [ 0.0, %entry ], [ %red.next, %loop ]
175 %gep = getelementptr float, ptr %ptr, i64 %iv
176 %lv = load float, ptr %gep
177 %red.next = fadd float %lv, %red
178 %iv.next = add nuw i64 %iv, 1
179 %cond = icmp ult i64 %iv.next, %bound
180 br i1 %cond, label %loop, label %exit
182 exit: ; preds = %bb135
183 %lcssa.red.next = phi float [ %red.next, %loop ]
184 ret float %lcssa.red.next
187 declare i64 @llvm.abs.i64(i64, i1)
189 declare i64 @llvm.smax.i64(i64, i64)
191 declare i64 @llvm.smin.i64(i64, i64)