1 //===- SwitchLoweringUtils.cpp - Switch Lowering --------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains switch inst lowering optimizations and utilities for
10 // codegen, so that it can be used for both SelectionDAG and GlobalISel.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SwitchLoweringUtils.h"
15 #include "llvm/CodeGen/FunctionLoweringInfo.h"
16 #include "llvm/CodeGen/MachineJumpTableInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/Target/TargetMachine.h"
21 using namespace SwitchCG
;
23 uint64_t SwitchCG::getJumpTableRange(const CaseClusterVector
&Clusters
,
24 unsigned First
, unsigned Last
) {
25 assert(Last
>= First
);
26 const APInt
&LowCase
= Clusters
[First
].Low
->getValue();
27 const APInt
&HighCase
= Clusters
[Last
].High
->getValue();
28 assert(LowCase
.getBitWidth() == HighCase
.getBitWidth());
30 // FIXME: A range of consecutive cases has 100% density, but only requires one
31 // comparison to lower. We should discriminate against such consecutive ranges
33 return (HighCase
- LowCase
).getLimitedValue((UINT64_MAX
- 1) / 100) + 1;
37 SwitchCG::getJumpTableNumCases(const SmallVectorImpl
<unsigned> &TotalCases
,
38 unsigned First
, unsigned Last
) {
39 assert(Last
>= First
);
40 assert(TotalCases
[Last
] >= TotalCases
[First
]);
42 TotalCases
[Last
] - (First
== 0 ? 0 : TotalCases
[First
- 1]);
46 void SwitchCG::SwitchLowering::findJumpTables(CaseClusterVector
&Clusters
,
48 std::optional
<SDLoc
> SL
,
49 MachineBasicBlock
*DefaultMBB
,
50 ProfileSummaryInfo
*PSI
,
51 BlockFrequencyInfo
*BFI
) {
53 // Clusters must be non-empty, sorted, and only contain Range clusters.
54 assert(!Clusters
.empty());
55 for (CaseCluster
&C
: Clusters
)
56 assert(C
.Kind
== CC_Range
);
57 for (unsigned i
= 1, e
= Clusters
.size(); i
< e
; ++i
)
58 assert(Clusters
[i
- 1].High
->getValue().slt(Clusters
[i
].Low
->getValue()));
61 assert(TLI
&& "TLI not set!");
62 if (!TLI
->areJTsAllowed(SI
->getParent()->getParent()))
65 const unsigned MinJumpTableEntries
= TLI
->getMinimumJumpTableEntries();
66 const unsigned SmallNumberOfEntries
= MinJumpTableEntries
/ 2;
68 // Bail if not enough cases.
69 const int64_t N
= Clusters
.size();
70 if (N
< 2 || N
< MinJumpTableEntries
)
73 // Accumulated number of cases in each cluster and those prior to it.
74 SmallVector
<unsigned, 8> TotalCases(N
);
75 for (unsigned i
= 0; i
< N
; ++i
) {
76 const APInt
&Hi
= Clusters
[i
].High
->getValue();
77 const APInt
&Lo
= Clusters
[i
].Low
->getValue();
78 TotalCases
[i
] = (Hi
- Lo
).getLimitedValue() + 1;
80 TotalCases
[i
] += TotalCases
[i
- 1];
83 uint64_t Range
= getJumpTableRange(Clusters
,0, N
- 1);
84 uint64_t NumCases
= getJumpTableNumCases(TotalCases
, 0, N
- 1);
85 assert(NumCases
< UINT64_MAX
/ 100);
86 assert(Range
>= NumCases
);
88 // Cheap case: the whole range may be suitable for jump table.
89 if (TLI
->isSuitableForJumpTable(SI
, NumCases
, Range
, PSI
, BFI
)) {
90 CaseCluster JTCluster
;
91 if (buildJumpTable(Clusters
, 0, N
- 1, SI
, SL
, DefaultMBB
, JTCluster
)) {
92 Clusters
[0] = JTCluster
;
98 // The algorithm below is not suitable for -O0.
99 if (TM
->getOptLevel() == CodeGenOptLevel::None
)
102 // Split Clusters into minimum number of dense partitions. The algorithm uses
103 // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
104 // for the Case Statement'" (1994), but builds the MinPartitions array in
105 // reverse order to make it easier to reconstruct the partitions in ascending
106 // order. In the choice between two optimal partitionings, it picks the one
107 // which yields more jump tables.
109 // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
110 SmallVector
<unsigned, 8> MinPartitions(N
);
111 // LastElement[i] is the last element of the partition starting at i.
112 SmallVector
<unsigned, 8> LastElement(N
);
113 // PartitionsScore[i] is used to break ties when choosing between two
114 // partitionings resulting in the same number of partitions.
115 SmallVector
<unsigned, 8> PartitionsScore(N
);
116 // For PartitionsScore, a small number of comparisons is considered as good as
117 // a jump table and a single comparison is considered better than a jump
119 enum PartitionScores
: unsigned {
126 // Base case: There is only one way to partition Clusters[N-1].
127 MinPartitions
[N
- 1] = 1;
128 LastElement
[N
- 1] = N
- 1;
129 PartitionsScore
[N
- 1] = PartitionScores::SingleCase
;
131 // Note: loop indexes are signed to avoid underflow.
132 for (int64_t i
= N
- 2; i
>= 0; i
--) {
133 // Find optimal partitioning of Clusters[i..N-1].
134 // Baseline: Put Clusters[i] into a partition on its own.
135 MinPartitions
[i
] = MinPartitions
[i
+ 1] + 1;
137 PartitionsScore
[i
] = PartitionsScore
[i
+ 1] + PartitionScores::SingleCase
;
139 // Search for a solution that results in fewer partitions.
140 for (int64_t j
= N
- 1; j
> i
; j
--) {
141 // Try building a partition from Clusters[i..j].
142 Range
= getJumpTableRange(Clusters
, i
, j
);
143 NumCases
= getJumpTableNumCases(TotalCases
, i
, j
);
144 assert(NumCases
< UINT64_MAX
/ 100);
145 assert(Range
>= NumCases
);
147 if (TLI
->isSuitableForJumpTable(SI
, NumCases
, Range
, PSI
, BFI
)) {
148 unsigned NumPartitions
= 1 + (j
== N
- 1 ? 0 : MinPartitions
[j
+ 1]);
149 unsigned Score
= j
== N
- 1 ? 0 : PartitionsScore
[j
+ 1];
150 int64_t NumEntries
= j
- i
+ 1;
153 Score
+= PartitionScores::SingleCase
;
154 else if (NumEntries
<= SmallNumberOfEntries
)
155 Score
+= PartitionScores::FewCases
;
156 else if (NumEntries
>= MinJumpTableEntries
)
157 Score
+= PartitionScores::Table
;
159 // If this leads to fewer partitions, or to the same number of
160 // partitions with better score, it is a better partitioning.
161 if (NumPartitions
< MinPartitions
[i
] ||
162 (NumPartitions
== MinPartitions
[i
] && Score
> PartitionsScore
[i
])) {
163 MinPartitions
[i
] = NumPartitions
;
165 PartitionsScore
[i
] = Score
;
171 // Iterate over the partitions, replacing some with jump tables in-place.
172 unsigned DstIndex
= 0;
173 for (unsigned First
= 0, Last
; First
< N
; First
= Last
+ 1) {
174 Last
= LastElement
[First
];
175 assert(Last
>= First
);
176 assert(DstIndex
<= First
);
177 unsigned NumClusters
= Last
- First
+ 1;
179 CaseCluster JTCluster
;
180 if (NumClusters
>= MinJumpTableEntries
&&
181 buildJumpTable(Clusters
, First
, Last
, SI
, SL
, DefaultMBB
, JTCluster
)) {
182 Clusters
[DstIndex
++] = JTCluster
;
184 for (unsigned I
= First
; I
<= Last
; ++I
)
185 std::memmove(&Clusters
[DstIndex
++], &Clusters
[I
], sizeof(Clusters
[I
]));
188 Clusters
.resize(DstIndex
);
191 bool SwitchCG::SwitchLowering::buildJumpTable(const CaseClusterVector
&Clusters
,
192 unsigned First
, unsigned Last
,
193 const SwitchInst
*SI
,
194 const std::optional
<SDLoc
> &SL
,
195 MachineBasicBlock
*DefaultMBB
,
196 CaseCluster
&JTCluster
) {
197 assert(First
<= Last
);
199 auto Prob
= BranchProbability::getZero();
200 unsigned NumCmps
= 0;
201 std::vector
<MachineBasicBlock
*> Table
;
202 DenseMap
<MachineBasicBlock
*, BranchProbability
> JTProbs
;
204 // Initialize probabilities in JTProbs.
205 for (unsigned I
= First
; I
<= Last
; ++I
)
206 JTProbs
[Clusters
[I
].MBB
] = BranchProbability::getZero();
208 for (unsigned I
= First
; I
<= Last
; ++I
) {
209 assert(Clusters
[I
].Kind
== CC_Range
);
210 Prob
+= Clusters
[I
].Prob
;
211 const APInt
&Low
= Clusters
[I
].Low
->getValue();
212 const APInt
&High
= Clusters
[I
].High
->getValue();
213 NumCmps
+= (Low
== High
) ? 1 : 2;
215 // Fill the gap between this and the previous cluster.
216 const APInt
&PreviousHigh
= Clusters
[I
- 1].High
->getValue();
217 assert(PreviousHigh
.slt(Low
));
218 uint64_t Gap
= (Low
- PreviousHigh
).getLimitedValue() - 1;
219 for (uint64_t J
= 0; J
< Gap
; J
++)
220 Table
.push_back(DefaultMBB
);
222 uint64_t ClusterSize
= (High
- Low
).getLimitedValue() + 1;
223 for (uint64_t J
= 0; J
< ClusterSize
; ++J
)
224 Table
.push_back(Clusters
[I
].MBB
);
225 JTProbs
[Clusters
[I
].MBB
] += Clusters
[I
].Prob
;
228 unsigned NumDests
= JTProbs
.size();
229 if (TLI
->isSuitableForBitTests(NumDests
, NumCmps
,
230 Clusters
[First
].Low
->getValue(),
231 Clusters
[Last
].High
->getValue(), *DL
)) {
232 // Clusters[First..Last] should be lowered as bit tests instead.
236 // Create the MBB that will load from and jump through the table.
237 // Note: We create it here, but it's not inserted into the function yet.
238 MachineFunction
*CurMF
= FuncInfo
.MF
;
239 MachineBasicBlock
*JumpTableMBB
=
240 CurMF
->CreateMachineBasicBlock(SI
->getParent());
242 // Add successors. Note: use table order for determinism.
243 SmallPtrSet
<MachineBasicBlock
*, 8> Done
;
244 for (MachineBasicBlock
*Succ
: Table
) {
245 if (Done
.count(Succ
))
247 addSuccessorWithProb(JumpTableMBB
, Succ
, JTProbs
[Succ
]);
250 JumpTableMBB
->normalizeSuccProbs();
252 unsigned JTI
= CurMF
->getOrCreateJumpTableInfo(TLI
->getJumpTableEncoding())
253 ->createJumpTableIndex(Table
);
255 // Set up the jump table info.
256 JumpTable
JT(-1U, JTI
, JumpTableMBB
, nullptr, SL
);
257 JumpTableHeader
JTH(Clusters
[First
].Low
->getValue(),
258 Clusters
[Last
].High
->getValue(), SI
->getCondition(),
260 JTCases
.emplace_back(std::move(JTH
), std::move(JT
));
262 JTCluster
= CaseCluster::jumpTable(Clusters
[First
].Low
, Clusters
[Last
].High
,
263 JTCases
.size() - 1, Prob
);
267 void SwitchCG::SwitchLowering::findBitTestClusters(CaseClusterVector
&Clusters
,
268 const SwitchInst
*SI
) {
269 // Partition Clusters into as few subsets as possible, where each subset has a
270 // range that fits in a machine word and has <= 3 unique destinations.
273 // Clusters must be sorted and contain Range or JumpTable clusters.
274 assert(!Clusters
.empty());
275 assert(Clusters
[0].Kind
== CC_Range
|| Clusters
[0].Kind
== CC_JumpTable
);
276 for (const CaseCluster
&C
: Clusters
)
277 assert(C
.Kind
== CC_Range
|| C
.Kind
== CC_JumpTable
);
278 for (unsigned i
= 1; i
< Clusters
.size(); ++i
)
279 assert(Clusters
[i
-1].High
->getValue().slt(Clusters
[i
].Low
->getValue()));
282 // The algorithm below is not suitable for -O0.
283 if (TM
->getOptLevel() == CodeGenOptLevel::None
)
286 // If target does not have legal shift left, do not emit bit tests at all.
287 EVT PTy
= TLI
->getPointerTy(*DL
);
288 if (!TLI
->isOperationLegal(ISD::SHL
, PTy
))
291 int BitWidth
= PTy
.getSizeInBits();
292 const int64_t N
= Clusters
.size();
294 // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
295 SmallVector
<unsigned, 8> MinPartitions(N
);
296 // LastElement[i] is the last element of the partition starting at i.
297 SmallVector
<unsigned, 8> LastElement(N
);
299 // FIXME: This might not be the best algorithm for finding bit test clusters.
301 // Base case: There is only one way to partition Clusters[N-1].
302 MinPartitions
[N
- 1] = 1;
303 LastElement
[N
- 1] = N
- 1;
305 // Note: loop indexes are signed to avoid underflow.
306 for (int64_t i
= N
- 2; i
>= 0; --i
) {
307 // Find optimal partitioning of Clusters[i..N-1].
308 // Baseline: Put Clusters[i] into a partition on its own.
309 MinPartitions
[i
] = MinPartitions
[i
+ 1] + 1;
312 // Search for a solution that results in fewer partitions.
313 // Note: the search is limited by BitWidth, reducing time complexity.
314 for (int64_t j
= std::min(N
- 1, i
+ BitWidth
- 1); j
> i
; --j
) {
315 // Try building a partition from Clusters[i..j].
318 if (!TLI
->rangeFitsInWord(Clusters
[i
].Low
->getValue(),
319 Clusters
[j
].High
->getValue(), *DL
))
322 // Check nbr of destinations and cluster types.
323 // FIXME: This works, but doesn't seem very efficient.
324 bool RangesOnly
= true;
325 BitVector
Dests(FuncInfo
.MF
->getNumBlockIDs());
326 for (int64_t k
= i
; k
<= j
; k
++) {
327 if (Clusters
[k
].Kind
!= CC_Range
) {
331 Dests
.set(Clusters
[k
].MBB
->getNumber());
333 if (!RangesOnly
|| Dests
.count() > 3)
336 // Check if it's a better partition.
337 unsigned NumPartitions
= 1 + (j
== N
- 1 ? 0 : MinPartitions
[j
+ 1]);
338 if (NumPartitions
< MinPartitions
[i
]) {
339 // Found a better partition.
340 MinPartitions
[i
] = NumPartitions
;
346 // Iterate over the partitions, replacing with bit-test clusters in-place.
347 unsigned DstIndex
= 0;
348 for (unsigned First
= 0, Last
; First
< N
; First
= Last
+ 1) {
349 Last
= LastElement
[First
];
350 assert(First
<= Last
);
351 assert(DstIndex
<= First
);
353 CaseCluster BitTestCluster
;
354 if (buildBitTests(Clusters
, First
, Last
, SI
, BitTestCluster
)) {
355 Clusters
[DstIndex
++] = BitTestCluster
;
357 size_t NumClusters
= Last
- First
+ 1;
358 std::memmove(&Clusters
[DstIndex
], &Clusters
[First
],
359 sizeof(Clusters
[0]) * NumClusters
);
360 DstIndex
+= NumClusters
;
363 Clusters
.resize(DstIndex
);
366 bool SwitchCG::SwitchLowering::buildBitTests(CaseClusterVector
&Clusters
,
367 unsigned First
, unsigned Last
,
368 const SwitchInst
*SI
,
369 CaseCluster
&BTCluster
) {
370 assert(First
<= Last
);
374 BitVector
Dests(FuncInfo
.MF
->getNumBlockIDs());
375 unsigned NumCmps
= 0;
376 for (int64_t I
= First
; I
<= Last
; ++I
) {
377 assert(Clusters
[I
].Kind
== CC_Range
);
378 Dests
.set(Clusters
[I
].MBB
->getNumber());
379 NumCmps
+= (Clusters
[I
].Low
== Clusters
[I
].High
) ? 1 : 2;
381 unsigned NumDests
= Dests
.count();
383 APInt Low
= Clusters
[First
].Low
->getValue();
384 APInt High
= Clusters
[Last
].High
->getValue();
385 assert(Low
.slt(High
));
387 if (!TLI
->isSuitableForBitTests(NumDests
, NumCmps
, Low
, High
, *DL
))
393 const int BitWidth
= TLI
->getPointerTy(*DL
).getSizeInBits();
394 assert(TLI
->rangeFitsInWord(Low
, High
, *DL
) &&
395 "Case range must fit in bit mask!");
397 // Check if the clusters cover a contiguous range such that no value in the
398 // range will jump to the default statement.
399 bool ContiguousRange
= true;
400 for (int64_t I
= First
+ 1; I
<= Last
; ++I
) {
401 if (Clusters
[I
].Low
->getValue() != Clusters
[I
- 1].High
->getValue() + 1) {
402 ContiguousRange
= false;
407 if (Low
.isStrictlyPositive() && High
.slt(BitWidth
)) {
408 // Optimize the case where all the case values fit in a word without having
409 // to subtract minValue. In this case, we can optimize away the subtraction.
410 LowBound
= APInt::getZero(Low
.getBitWidth());
412 ContiguousRange
= false;
415 CmpRange
= High
- Low
;
419 auto TotalProb
= BranchProbability::getZero();
420 for (unsigned i
= First
; i
<= Last
; ++i
) {
421 // Find the CaseBits for this destination.
423 for (j
= 0; j
< CBV
.size(); ++j
)
424 if (CBV
[j
].BB
== Clusters
[i
].MBB
)
428 CaseBits(0, Clusters
[i
].MBB
, 0, BranchProbability::getZero()));
429 CaseBits
*CB
= &CBV
[j
];
431 // Update Mask, Bits and ExtraProb.
432 uint64_t Lo
= (Clusters
[i
].Low
->getValue() - LowBound
).getZExtValue();
433 uint64_t Hi
= (Clusters
[i
].High
->getValue() - LowBound
).getZExtValue();
434 assert(Hi
>= Lo
&& Hi
< 64 && "Invalid bit case!");
435 CB
->Mask
|= (-1ULL >> (63 - (Hi
- Lo
))) << Lo
;
436 CB
->Bits
+= Hi
- Lo
+ 1;
437 CB
->ExtraProb
+= Clusters
[i
].Prob
;
438 TotalProb
+= Clusters
[i
].Prob
;
442 llvm::sort(CBV
, [](const CaseBits
&a
, const CaseBits
&b
) {
443 // Sort by probability first, number of bits second, bit mask third.
444 if (a
.ExtraProb
!= b
.ExtraProb
)
445 return a
.ExtraProb
> b
.ExtraProb
;
446 if (a
.Bits
!= b
.Bits
)
447 return a
.Bits
> b
.Bits
;
448 return a
.Mask
< b
.Mask
;
451 for (auto &CB
: CBV
) {
452 MachineBasicBlock
*BitTestBB
=
453 FuncInfo
.MF
->CreateMachineBasicBlock(SI
->getParent());
454 BTI
.push_back(BitTestCase(CB
.Mask
, BitTestBB
, CB
.BB
, CB
.ExtraProb
));
456 BitTestCases
.emplace_back(std::move(LowBound
), std::move(CmpRange
),
457 SI
->getCondition(), -1U, MVT::Other
, false,
458 ContiguousRange
, nullptr, nullptr, std::move(BTI
),
461 BTCluster
= CaseCluster::bitTests(Clusters
[First
].Low
, Clusters
[Last
].High
,
462 BitTestCases
.size() - 1, TotalProb
);
466 void SwitchCG::sortAndRangeify(CaseClusterVector
&Clusters
) {
468 for (const CaseCluster
&CC
: Clusters
)
469 assert(CC
.Low
== CC
.High
&& "Input clusters must be single-case");
472 llvm::sort(Clusters
, [](const CaseCluster
&a
, const CaseCluster
&b
) {
473 return a
.Low
->getValue().slt(b
.Low
->getValue());
476 // Merge adjacent clusters with the same destination.
477 const unsigned N
= Clusters
.size();
478 unsigned DstIndex
= 0;
479 for (unsigned SrcIndex
= 0; SrcIndex
< N
; ++SrcIndex
) {
480 CaseCluster
&CC
= Clusters
[SrcIndex
];
481 const ConstantInt
*CaseVal
= CC
.Low
;
482 MachineBasicBlock
*Succ
= CC
.MBB
;
484 if (DstIndex
!= 0 && Clusters
[DstIndex
- 1].MBB
== Succ
&&
485 (CaseVal
->getValue() - Clusters
[DstIndex
- 1].High
->getValue()) == 1) {
486 // If this case has the same successor and is a neighbour, merge it into
487 // the previous cluster.
488 Clusters
[DstIndex
- 1].High
= CaseVal
;
489 Clusters
[DstIndex
- 1].Prob
+= CC
.Prob
;
491 std::memmove(&Clusters
[DstIndex
++], &Clusters
[SrcIndex
],
492 sizeof(Clusters
[SrcIndex
]));
495 Clusters
.resize(DstIndex
);
498 unsigned SwitchCG::SwitchLowering::caseClusterRank(const CaseCluster
&CC
,
500 CaseClusterIt Last
) {
501 return std::count_if(First
, Last
+ 1, [&](const CaseCluster
&X
) {
502 if (X
.Prob
!= CC
.Prob
)
503 return X
.Prob
> CC
.Prob
;
505 // Ties are broken by comparing the case value.
506 return X
.Low
->getValue().slt(CC
.Low
->getValue());
510 llvm::SwitchCG::SwitchLowering::SplitWorkItemInfo
511 SwitchCG::SwitchLowering::computeSplitWorkItemInfo(
512 const SwitchWorkListItem
&W
) {
513 CaseClusterIt LastLeft
= W
.FirstCluster
;
514 CaseClusterIt FirstRight
= W
.LastCluster
;
515 auto LeftProb
= LastLeft
->Prob
+ W
.DefaultProb
/ 2;
516 auto RightProb
= FirstRight
->Prob
+ W
.DefaultProb
/ 2;
518 // Move LastLeft and FirstRight towards each other from opposite directions to
519 // find a partitioning of the clusters which balances the probability on both
520 // sides. If LeftProb and RightProb are equal, alternate which side is
521 // taken to ensure 0-probability nodes are distributed evenly.
523 while (LastLeft
+ 1 < FirstRight
) {
524 if (LeftProb
< RightProb
|| (LeftProb
== RightProb
&& (I
& 1)))
525 LeftProb
+= (++LastLeft
)->Prob
;
527 RightProb
+= (--FirstRight
)->Prob
;
532 // Our binary search tree differs from a typical BST in that ours can have
533 // up to three values in each leaf. The pivot selection above doesn't take
534 // that into account, which means the tree might require more nodes and be
535 // less efficient. We compensate for this here.
537 unsigned NumLeft
= LastLeft
- W
.FirstCluster
+ 1;
538 unsigned NumRight
= W
.LastCluster
- FirstRight
+ 1;
540 if (std::min(NumLeft
, NumRight
) < 3 && std::max(NumLeft
, NumRight
) > 3) {
541 // If one side has less than 3 clusters, and the other has more than 3,
542 // consider taking a cluster from the other side.
544 if (NumLeft
< NumRight
) {
545 // Consider moving the first cluster on the right to the left side.
546 CaseCluster
&CC
= *FirstRight
;
547 unsigned RightSideRank
= caseClusterRank(CC
, FirstRight
, W
.LastCluster
);
548 unsigned LeftSideRank
= caseClusterRank(CC
, W
.FirstCluster
, LastLeft
);
549 if (LeftSideRank
<= RightSideRank
) {
550 // Moving the cluster to the left does not demote it.
556 assert(NumRight
< NumLeft
);
557 // Consider moving the last element on the left to the right side.
558 CaseCluster
&CC
= *LastLeft
;
559 unsigned LeftSideRank
= caseClusterRank(CC
, W
.FirstCluster
, LastLeft
);
560 unsigned RightSideRank
= caseClusterRank(CC
, FirstRight
, W
.LastCluster
);
561 if (RightSideRank
<= LeftSideRank
) {
562 // Moving the cluster to the right does not demot it.
572 assert(LastLeft
+ 1 == FirstRight
);
573 assert(LastLeft
>= W
.FirstCluster
);
574 assert(FirstRight
<= W
.LastCluster
);
576 return SplitWorkItemInfo
{LastLeft
, FirstRight
, LeftProb
, RightProb
};