1 //===- SwitchLoweringUtils.cpp - Switch Lowering --------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains switch inst lowering optimizations and utilities for
10 // codegen, so that it can be used for both SelectionDAG and GlobalISel.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SwitchLoweringUtils.h"
15 #include "llvm/CodeGen/FunctionLoweringInfo.h"
16 #include "llvm/CodeGen/MachineJumpTableInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/Target/TargetMachine.h"
21 using namespace SwitchCG
;
23 uint64_t SwitchCG::getJumpTableRange(const CaseClusterVector
&Clusters
,
24 unsigned First
, unsigned Last
) {
25 assert(Last
>= First
);
26 const APInt
&LowCase
= Clusters
[First
].Low
->getValue();
27 const APInt
&HighCase
= Clusters
[Last
].High
->getValue();
28 assert(LowCase
.getBitWidth() == HighCase
.getBitWidth());
30 // FIXME: A range of consecutive cases has 100% density, but only requires one
31 // comparison to lower. We should discriminate against such consecutive ranges
33 return (HighCase
- LowCase
).getLimitedValue((UINT64_MAX
- 1) / 100) + 1;
37 SwitchCG::getJumpTableNumCases(const SmallVectorImpl
<unsigned> &TotalCases
,
38 unsigned First
, unsigned Last
) {
39 assert(Last
>= First
);
40 assert(TotalCases
[Last
] >= TotalCases
[First
]);
42 TotalCases
[Last
] - (First
== 0 ? 0 : TotalCases
[First
- 1]);
46 void SwitchCG::SwitchLowering::findJumpTables(CaseClusterVector
&Clusters
,
48 std::optional
<SDLoc
> SL
,
49 MachineBasicBlock
*DefaultMBB
,
50 ProfileSummaryInfo
*PSI
,
51 BlockFrequencyInfo
*BFI
) {
53 // Clusters must be non-empty, sorted, and only contain Range clusters.
54 assert(!Clusters
.empty());
55 for (CaseCluster
&C
: Clusters
)
56 assert(C
.Kind
== CC_Range
);
57 for (unsigned i
= 1, e
= Clusters
.size(); i
< e
; ++i
)
58 assert(Clusters
[i
- 1].High
->getValue().slt(Clusters
[i
].Low
->getValue()));
61 assert(TLI
&& "TLI not set!");
62 if (!TLI
->areJTsAllowed(SI
->getParent()->getParent()))
65 const unsigned MinJumpTableEntries
= TLI
->getMinimumJumpTableEntries();
66 const unsigned SmallNumberOfEntries
= MinJumpTableEntries
/ 2;
68 // Bail if not enough cases.
69 const int64_t N
= Clusters
.size();
70 if (N
< 2 || N
< MinJumpTableEntries
)
73 // Accumulated number of cases in each cluster and those prior to it.
74 SmallVector
<unsigned, 8> TotalCases(N
);
75 for (unsigned i
= 0; i
< N
; ++i
) {
76 const APInt
&Hi
= Clusters
[i
].High
->getValue();
77 const APInt
&Lo
= Clusters
[i
].Low
->getValue();
78 TotalCases
[i
] = (Hi
- Lo
).getLimitedValue() + 1;
80 TotalCases
[i
] += TotalCases
[i
- 1];
83 uint64_t Range
= getJumpTableRange(Clusters
,0, N
- 1);
84 uint64_t NumCases
= getJumpTableNumCases(TotalCases
, 0, N
- 1);
85 assert(NumCases
< UINT64_MAX
/ 100);
86 assert(Range
>= NumCases
);
88 // Cheap case: the whole range may be suitable for jump table.
89 if (TLI
->isSuitableForJumpTable(SI
, NumCases
, Range
, PSI
, BFI
)) {
90 CaseCluster JTCluster
;
91 if (buildJumpTable(Clusters
, 0, N
- 1, SI
, SL
, DefaultMBB
, JTCluster
)) {
92 Clusters
[0] = JTCluster
;
98 // The algorithm below is not suitable for -O0.
99 if (TM
->getOptLevel() == CodeGenOptLevel::None
)
102 // Split Clusters into minimum number of dense partitions. The algorithm uses
103 // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
104 // for the Case Statement'" (1994), but builds the MinPartitions array in
105 // reverse order to make it easier to reconstruct the partitions in ascending
106 // order. In the choice between two optimal partitionings, it picks the one
107 // which yields more jump tables. The algorithm is described in
108 // https://arxiv.org/pdf/1910.02351v2
110 // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
111 SmallVector
<unsigned, 8> MinPartitions(N
);
112 // LastElement[i] is the last element of the partition starting at i.
113 SmallVector
<unsigned, 8> LastElement(N
);
114 // PartitionsScore[i] is used to break ties when choosing between two
115 // partitionings resulting in the same number of partitions.
116 SmallVector
<unsigned, 8> PartitionsScore(N
);
117 // For PartitionsScore, a small number of comparisons is considered as good as
118 // a jump table and a single comparison is considered better than a jump
120 enum PartitionScores
: unsigned {
127 // Base case: There is only one way to partition Clusters[N-1].
128 MinPartitions
[N
- 1] = 1;
129 LastElement
[N
- 1] = N
- 1;
130 PartitionsScore
[N
- 1] = PartitionScores::SingleCase
;
132 // Note: loop indexes are signed to avoid underflow.
133 for (int64_t i
= N
- 2; i
>= 0; i
--) {
134 // Find optimal partitioning of Clusters[i..N-1].
135 // Baseline: Put Clusters[i] into a partition on its own.
136 MinPartitions
[i
] = MinPartitions
[i
+ 1] + 1;
138 PartitionsScore
[i
] = PartitionsScore
[i
+ 1] + PartitionScores::SingleCase
;
140 // Search for a solution that results in fewer partitions.
141 for (int64_t j
= N
- 1; j
> i
; j
--) {
142 // Try building a partition from Clusters[i..j].
143 Range
= getJumpTableRange(Clusters
, i
, j
);
144 NumCases
= getJumpTableNumCases(TotalCases
, i
, j
);
145 assert(NumCases
< UINT64_MAX
/ 100);
146 assert(Range
>= NumCases
);
148 if (TLI
->isSuitableForJumpTable(SI
, NumCases
, Range
, PSI
, BFI
)) {
149 unsigned NumPartitions
= 1 + (j
== N
- 1 ? 0 : MinPartitions
[j
+ 1]);
150 unsigned Score
= j
== N
- 1 ? 0 : PartitionsScore
[j
+ 1];
151 int64_t NumEntries
= j
- i
+ 1;
154 Score
+= PartitionScores::SingleCase
;
155 else if (NumEntries
<= SmallNumberOfEntries
)
156 Score
+= PartitionScores::FewCases
;
157 else if (NumEntries
>= MinJumpTableEntries
)
158 Score
+= PartitionScores::Table
;
160 // If this leads to fewer partitions, or to the same number of
161 // partitions with better score, it is a better partitioning.
162 if (NumPartitions
< MinPartitions
[i
] ||
163 (NumPartitions
== MinPartitions
[i
] && Score
> PartitionsScore
[i
])) {
164 MinPartitions
[i
] = NumPartitions
;
166 PartitionsScore
[i
] = Score
;
172 // Iterate over the partitions, replacing some with jump tables in-place.
173 unsigned DstIndex
= 0;
174 for (unsigned First
= 0, Last
; First
< N
; First
= Last
+ 1) {
175 Last
= LastElement
[First
];
176 assert(Last
>= First
);
177 assert(DstIndex
<= First
);
178 unsigned NumClusters
= Last
- First
+ 1;
180 CaseCluster JTCluster
;
181 if (NumClusters
>= MinJumpTableEntries
&&
182 buildJumpTable(Clusters
, First
, Last
, SI
, SL
, DefaultMBB
, JTCluster
)) {
183 Clusters
[DstIndex
++] = JTCluster
;
185 for (unsigned I
= First
; I
<= Last
; ++I
)
186 std::memmove(&Clusters
[DstIndex
++], &Clusters
[I
], sizeof(Clusters
[I
]));
189 Clusters
.resize(DstIndex
);
192 bool SwitchCG::SwitchLowering::buildJumpTable(const CaseClusterVector
&Clusters
,
193 unsigned First
, unsigned Last
,
194 const SwitchInst
*SI
,
195 const std::optional
<SDLoc
> &SL
,
196 MachineBasicBlock
*DefaultMBB
,
197 CaseCluster
&JTCluster
) {
198 assert(First
<= Last
);
200 auto Prob
= BranchProbability::getZero();
201 unsigned NumCmps
= 0;
202 std::vector
<MachineBasicBlock
*> Table
;
203 DenseMap
<MachineBasicBlock
*, BranchProbability
> JTProbs
;
205 // Initialize probabilities in JTProbs.
206 for (unsigned I
= First
; I
<= Last
; ++I
)
207 JTProbs
[Clusters
[I
].MBB
] = BranchProbability::getZero();
209 for (unsigned I
= First
; I
<= Last
; ++I
) {
210 assert(Clusters
[I
].Kind
== CC_Range
);
211 Prob
+= Clusters
[I
].Prob
;
212 const APInt
&Low
= Clusters
[I
].Low
->getValue();
213 const APInt
&High
= Clusters
[I
].High
->getValue();
214 NumCmps
+= (Low
== High
) ? 1 : 2;
216 // Fill the gap between this and the previous cluster.
217 const APInt
&PreviousHigh
= Clusters
[I
- 1].High
->getValue();
218 assert(PreviousHigh
.slt(Low
));
219 uint64_t Gap
= (Low
- PreviousHigh
).getLimitedValue() - 1;
220 for (uint64_t J
= 0; J
< Gap
; J
++)
221 Table
.push_back(DefaultMBB
);
223 uint64_t ClusterSize
= (High
- Low
).getLimitedValue() + 1;
224 for (uint64_t J
= 0; J
< ClusterSize
; ++J
)
225 Table
.push_back(Clusters
[I
].MBB
);
226 JTProbs
[Clusters
[I
].MBB
] += Clusters
[I
].Prob
;
229 unsigned NumDests
= JTProbs
.size();
230 if (TLI
->isSuitableForBitTests(NumDests
, NumCmps
,
231 Clusters
[First
].Low
->getValue(),
232 Clusters
[Last
].High
->getValue(), *DL
)) {
233 // Clusters[First..Last] should be lowered as bit tests instead.
237 // Create the MBB that will load from and jump through the table.
238 // Note: We create it here, but it's not inserted into the function yet.
239 MachineFunction
*CurMF
= FuncInfo
.MF
;
240 MachineBasicBlock
*JumpTableMBB
=
241 CurMF
->CreateMachineBasicBlock(SI
->getParent());
243 // Add successors. Note: use table order for determinism.
244 SmallPtrSet
<MachineBasicBlock
*, 8> Done
;
245 for (MachineBasicBlock
*Succ
: Table
) {
246 if (Done
.count(Succ
))
248 addSuccessorWithProb(JumpTableMBB
, Succ
, JTProbs
[Succ
]);
251 JumpTableMBB
->normalizeSuccProbs();
253 unsigned JTI
= CurMF
->getOrCreateJumpTableInfo(TLI
->getJumpTableEncoding())
254 ->createJumpTableIndex(Table
);
256 // Set up the jump table info.
257 JumpTable
JT(-1U, JTI
, JumpTableMBB
, nullptr, SL
);
258 JumpTableHeader
JTH(Clusters
[First
].Low
->getValue(),
259 Clusters
[Last
].High
->getValue(), SI
->getCondition(),
261 JTCases
.emplace_back(std::move(JTH
), std::move(JT
));
263 JTCluster
= CaseCluster::jumpTable(Clusters
[First
].Low
, Clusters
[Last
].High
,
264 JTCases
.size() - 1, Prob
);
268 void SwitchCG::SwitchLowering::findBitTestClusters(CaseClusterVector
&Clusters
,
269 const SwitchInst
*SI
) {
270 // Partition Clusters into as few subsets as possible, where each subset has a
271 // range that fits in a machine word and has <= 3 unique destinations.
274 // Clusters must be sorted and contain Range or JumpTable clusters.
275 assert(!Clusters
.empty());
276 assert(Clusters
[0].Kind
== CC_Range
|| Clusters
[0].Kind
== CC_JumpTable
);
277 for (const CaseCluster
&C
: Clusters
)
278 assert(C
.Kind
== CC_Range
|| C
.Kind
== CC_JumpTable
);
279 for (unsigned i
= 1; i
< Clusters
.size(); ++i
)
280 assert(Clusters
[i
-1].High
->getValue().slt(Clusters
[i
].Low
->getValue()));
283 // The algorithm below is not suitable for -O0.
284 if (TM
->getOptLevel() == CodeGenOptLevel::None
)
287 // If target does not have legal shift left, do not emit bit tests at all.
288 EVT PTy
= TLI
->getPointerTy(*DL
);
289 if (!TLI
->isOperationLegal(ISD::SHL
, PTy
))
292 int BitWidth
= PTy
.getSizeInBits();
293 const int64_t N
= Clusters
.size();
295 // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
296 SmallVector
<unsigned, 8> MinPartitions(N
);
297 // LastElement[i] is the last element of the partition starting at i.
298 SmallVector
<unsigned, 8> LastElement(N
);
300 // FIXME: This might not be the best algorithm for finding bit test clusters.
302 // Base case: There is only one way to partition Clusters[N-1].
303 MinPartitions
[N
- 1] = 1;
304 LastElement
[N
- 1] = N
- 1;
306 // Note: loop indexes are signed to avoid underflow.
307 for (int64_t i
= N
- 2; i
>= 0; --i
) {
308 // Find optimal partitioning of Clusters[i..N-1].
309 // Baseline: Put Clusters[i] into a partition on its own.
310 MinPartitions
[i
] = MinPartitions
[i
+ 1] + 1;
313 // Search for a solution that results in fewer partitions.
314 // Note: the search is limited by BitWidth, reducing time complexity.
315 for (int64_t j
= std::min(N
- 1, i
+ BitWidth
- 1); j
> i
; --j
) {
316 // Try building a partition from Clusters[i..j].
319 if (!TLI
->rangeFitsInWord(Clusters
[i
].Low
->getValue(),
320 Clusters
[j
].High
->getValue(), *DL
))
323 // Check nbr of destinations and cluster types.
324 // FIXME: This works, but doesn't seem very efficient.
325 bool RangesOnly
= true;
326 BitVector
Dests(FuncInfo
.MF
->getNumBlockIDs());
327 for (int64_t k
= i
; k
<= j
; k
++) {
328 if (Clusters
[k
].Kind
!= CC_Range
) {
332 Dests
.set(Clusters
[k
].MBB
->getNumber());
334 if (!RangesOnly
|| Dests
.count() > 3)
337 // Check if it's a better partition.
338 unsigned NumPartitions
= 1 + (j
== N
- 1 ? 0 : MinPartitions
[j
+ 1]);
339 if (NumPartitions
< MinPartitions
[i
]) {
340 // Found a better partition.
341 MinPartitions
[i
] = NumPartitions
;
347 // Iterate over the partitions, replacing with bit-test clusters in-place.
348 unsigned DstIndex
= 0;
349 for (unsigned First
= 0, Last
; First
< N
; First
= Last
+ 1) {
350 Last
= LastElement
[First
];
351 assert(First
<= Last
);
352 assert(DstIndex
<= First
);
354 CaseCluster BitTestCluster
;
355 if (buildBitTests(Clusters
, First
, Last
, SI
, BitTestCluster
)) {
356 Clusters
[DstIndex
++] = BitTestCluster
;
358 size_t NumClusters
= Last
- First
+ 1;
359 std::memmove(&Clusters
[DstIndex
], &Clusters
[First
],
360 sizeof(Clusters
[0]) * NumClusters
);
361 DstIndex
+= NumClusters
;
364 Clusters
.resize(DstIndex
);
367 bool SwitchCG::SwitchLowering::buildBitTests(CaseClusterVector
&Clusters
,
368 unsigned First
, unsigned Last
,
369 const SwitchInst
*SI
,
370 CaseCluster
&BTCluster
) {
371 assert(First
<= Last
);
375 BitVector
Dests(FuncInfo
.MF
->getNumBlockIDs());
376 unsigned NumCmps
= 0;
377 for (int64_t I
= First
; I
<= Last
; ++I
) {
378 assert(Clusters
[I
].Kind
== CC_Range
);
379 Dests
.set(Clusters
[I
].MBB
->getNumber());
380 NumCmps
+= (Clusters
[I
].Low
== Clusters
[I
].High
) ? 1 : 2;
382 unsigned NumDests
= Dests
.count();
384 APInt Low
= Clusters
[First
].Low
->getValue();
385 APInt High
= Clusters
[Last
].High
->getValue();
386 assert(Low
.slt(High
));
388 if (!TLI
->isSuitableForBitTests(NumDests
, NumCmps
, Low
, High
, *DL
))
394 const int BitWidth
= TLI
->getPointerTy(*DL
).getSizeInBits();
395 assert(TLI
->rangeFitsInWord(Low
, High
, *DL
) &&
396 "Case range must fit in bit mask!");
398 // Check if the clusters cover a contiguous range such that no value in the
399 // range will jump to the default statement.
400 bool ContiguousRange
= true;
401 for (int64_t I
= First
+ 1; I
<= Last
; ++I
) {
402 if (Clusters
[I
].Low
->getValue() != Clusters
[I
- 1].High
->getValue() + 1) {
403 ContiguousRange
= false;
408 if (Low
.isStrictlyPositive() && High
.slt(BitWidth
)) {
409 // Optimize the case where all the case values fit in a word without having
410 // to subtract minValue. In this case, we can optimize away the subtraction.
411 LowBound
= APInt::getZero(Low
.getBitWidth());
413 ContiguousRange
= false;
416 CmpRange
= High
- Low
;
420 auto TotalProb
= BranchProbability::getZero();
421 for (unsigned i
= First
; i
<= Last
; ++i
) {
422 // Find the CaseBits for this destination.
424 for (j
= 0; j
< CBV
.size(); ++j
)
425 if (CBV
[j
].BB
== Clusters
[i
].MBB
)
429 CaseBits(0, Clusters
[i
].MBB
, 0, BranchProbability::getZero()));
430 CaseBits
*CB
= &CBV
[j
];
432 // Update Mask, Bits and ExtraProb.
433 uint64_t Lo
= (Clusters
[i
].Low
->getValue() - LowBound
).getZExtValue();
434 uint64_t Hi
= (Clusters
[i
].High
->getValue() - LowBound
).getZExtValue();
435 assert(Hi
>= Lo
&& Hi
< 64 && "Invalid bit case!");
436 CB
->Mask
|= (-1ULL >> (63 - (Hi
- Lo
))) << Lo
;
437 CB
->Bits
+= Hi
- Lo
+ 1;
438 CB
->ExtraProb
+= Clusters
[i
].Prob
;
439 TotalProb
+= Clusters
[i
].Prob
;
443 llvm::sort(CBV
, [](const CaseBits
&a
, const CaseBits
&b
) {
444 // Sort by probability first, number of bits second, bit mask third.
445 if (a
.ExtraProb
!= b
.ExtraProb
)
446 return a
.ExtraProb
> b
.ExtraProb
;
447 if (a
.Bits
!= b
.Bits
)
448 return a
.Bits
> b
.Bits
;
449 return a
.Mask
< b
.Mask
;
452 for (auto &CB
: CBV
) {
453 MachineBasicBlock
*BitTestBB
=
454 FuncInfo
.MF
->CreateMachineBasicBlock(SI
->getParent());
455 BTI
.push_back(BitTestCase(CB
.Mask
, BitTestBB
, CB
.BB
, CB
.ExtraProb
));
457 BitTestCases
.emplace_back(std::move(LowBound
), std::move(CmpRange
),
458 SI
->getCondition(), -1U, MVT::Other
, false,
459 ContiguousRange
, nullptr, nullptr, std::move(BTI
),
462 BTCluster
= CaseCluster::bitTests(Clusters
[First
].Low
, Clusters
[Last
].High
,
463 BitTestCases
.size() - 1, TotalProb
);
467 void SwitchCG::sortAndRangeify(CaseClusterVector
&Clusters
) {
469 for (const CaseCluster
&CC
: Clusters
)
470 assert(CC
.Low
== CC
.High
&& "Input clusters must be single-case");
473 llvm::sort(Clusters
, [](const CaseCluster
&a
, const CaseCluster
&b
) {
474 return a
.Low
->getValue().slt(b
.Low
->getValue());
477 // Merge adjacent clusters with the same destination.
478 const unsigned N
= Clusters
.size();
479 unsigned DstIndex
= 0;
480 for (unsigned SrcIndex
= 0; SrcIndex
< N
; ++SrcIndex
) {
481 CaseCluster
&CC
= Clusters
[SrcIndex
];
482 const ConstantInt
*CaseVal
= CC
.Low
;
483 MachineBasicBlock
*Succ
= CC
.MBB
;
485 if (DstIndex
!= 0 && Clusters
[DstIndex
- 1].MBB
== Succ
&&
486 (CaseVal
->getValue() - Clusters
[DstIndex
- 1].High
->getValue()) == 1) {
487 // If this case has the same successor and is a neighbour, merge it into
488 // the previous cluster.
489 Clusters
[DstIndex
- 1].High
= CaseVal
;
490 Clusters
[DstIndex
- 1].Prob
+= CC
.Prob
;
492 std::memmove(&Clusters
[DstIndex
++], &Clusters
[SrcIndex
],
493 sizeof(Clusters
[SrcIndex
]));
496 Clusters
.resize(DstIndex
);
499 unsigned SwitchCG::SwitchLowering::caseClusterRank(const CaseCluster
&CC
,
501 CaseClusterIt Last
) {
502 return std::count_if(First
, Last
+ 1, [&](const CaseCluster
&X
) {
503 if (X
.Prob
!= CC
.Prob
)
504 return X
.Prob
> CC
.Prob
;
506 // Ties are broken by comparing the case value.
507 return X
.Low
->getValue().slt(CC
.Low
->getValue());
511 llvm::SwitchCG::SwitchLowering::SplitWorkItemInfo
512 SwitchCG::SwitchLowering::computeSplitWorkItemInfo(
513 const SwitchWorkListItem
&W
) {
514 CaseClusterIt LastLeft
= W
.FirstCluster
;
515 CaseClusterIt FirstRight
= W
.LastCluster
;
516 auto LeftProb
= LastLeft
->Prob
+ W
.DefaultProb
/ 2;
517 auto RightProb
= FirstRight
->Prob
+ W
.DefaultProb
/ 2;
519 // Move LastLeft and FirstRight towards each other from opposite directions to
520 // find a partitioning of the clusters which balances the probability on both
521 // sides. If LeftProb and RightProb are equal, alternate which side is
522 // taken to ensure 0-probability nodes are distributed evenly.
524 while (LastLeft
+ 1 < FirstRight
) {
525 if (LeftProb
< RightProb
|| (LeftProb
== RightProb
&& (I
& 1)))
526 LeftProb
+= (++LastLeft
)->Prob
;
528 RightProb
+= (--FirstRight
)->Prob
;
533 // Our binary search tree differs from a typical BST in that ours can have
534 // up to three values in each leaf. The pivot selection above doesn't take
535 // that into account, which means the tree might require more nodes and be
536 // less efficient. We compensate for this here.
538 unsigned NumLeft
= LastLeft
- W
.FirstCluster
+ 1;
539 unsigned NumRight
= W
.LastCluster
- FirstRight
+ 1;
541 if (std::min(NumLeft
, NumRight
) < 3 && std::max(NumLeft
, NumRight
) > 3) {
542 // If one side has less than 3 clusters, and the other has more than 3,
543 // consider taking a cluster from the other side.
545 if (NumLeft
< NumRight
) {
546 // Consider moving the first cluster on the right to the left side.
547 CaseCluster
&CC
= *FirstRight
;
548 unsigned RightSideRank
= caseClusterRank(CC
, FirstRight
, W
.LastCluster
);
549 unsigned LeftSideRank
= caseClusterRank(CC
, W
.FirstCluster
, LastLeft
);
550 if (LeftSideRank
<= RightSideRank
) {
551 // Moving the cluster to the left does not demote it.
557 assert(NumRight
< NumLeft
);
558 // Consider moving the last element on the left to the right side.
559 CaseCluster
&CC
= *LastLeft
;
560 unsigned LeftSideRank
= caseClusterRank(CC
, W
.FirstCluster
, LastLeft
);
561 unsigned RightSideRank
= caseClusterRank(CC
, FirstRight
, W
.LastCluster
);
562 if (RightSideRank
<= LeftSideRank
) {
563 // Moving the cluster to the right does not demot it.
573 assert(LastLeft
+ 1 == FirstRight
);
574 assert(LastLeft
>= W
.FirstCluster
);
575 assert(FirstRight
<= W
.LastCluster
);
577 return SplitWorkItemInfo
{LastLeft
, FirstRight
, LeftProb
, RightProb
};