[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / ARM / ARMConstantIslandPass.cpp
blobe15826fa6159791a8efa90051cf684d562d3dfdb
1 //===- ARMConstantIslandPass.cpp - ARM constant islands -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that splits the constant pool up into 'islands'
10 // which are scattered through-out the function. This is required due to the
11 // limited pc-relative displacements that ARM has.
13 //===----------------------------------------------------------------------===//
15 #include "ARM.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBasicBlockInfo.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
20 #include "MCTargetDesc/ARMBaseInfo.h"
21 #include "Thumb2InstrInfo.h"
22 #include "Utils/ARMBaseInfo.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/ADT/StringRef.h"
29 #include "llvm/CodeGen/LivePhysRegs.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineConstantPool.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineFunctionPass.h"
35 #include "llvm/CodeGen/MachineInstr.h"
36 #include "llvm/CodeGen/MachineJumpTableInfo.h"
37 #include "llvm/CodeGen/MachineOperand.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/Config/llvm-config.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugLoc.h"
42 #include "llvm/MC/MCInstrDesc.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Compiler.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/Format.h"
49 #include "llvm/Support/MathExtras.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include <algorithm>
52 #include <cassert>
53 #include <cstdint>
54 #include <iterator>
55 #include <utility>
56 #include <vector>
58 using namespace llvm;
60 #define DEBUG_TYPE "arm-cp-islands"
62 #define ARM_CP_ISLANDS_OPT_NAME \
63 "ARM constant island placement and branch shortening pass"
64 STATISTIC(NumCPEs, "Number of constpool entries");
65 STATISTIC(NumSplit, "Number of uncond branches inserted");
66 STATISTIC(NumCBrFixed, "Number of cond branches fixed");
67 STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
68 STATISTIC(NumTBs, "Number of table branches generated");
69 STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk");
70 STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
71 STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed");
72 STATISTIC(NumJTMoved, "Number of jump table destination blocks moved");
73 STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
74 STATISTIC(NumLEInserted, "Number of LE backwards branches inserted");
76 static cl::opt<bool>
77 AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
78 cl::desc("Adjust basic block layout to better use TB[BH]"));
80 static cl::opt<unsigned>
81 CPMaxIteration("arm-constant-island-max-iteration", cl::Hidden, cl::init(30),
82 cl::desc("The max number of iteration for converge"));
84 static cl::opt<bool> SynthesizeThumb1TBB(
85 "arm-synthesize-thumb-1-tbb", cl::Hidden, cl::init(true),
86 cl::desc("Use compressed jump tables in Thumb-1 by synthesizing an "
87 "equivalent to the TBB/TBH instructions"));
89 namespace {
91 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
92 /// requires constant pool entries to be scattered among the instructions
93 /// inside a function. To do this, it completely ignores the normal LLVM
94 /// constant pool; instead, it places constants wherever it feels like with
95 /// special instructions.
96 ///
97 /// The terminology used in this pass includes:
98 /// Islands - Clumps of constants placed in the function.
99 /// Water - Potential places where an island could be formed.
100 /// CPE - A constant pool entry that has been placed somewhere, which
101 /// tracks a list of users.
102 class ARMConstantIslands : public MachineFunctionPass {
103 std::unique_ptr<ARMBasicBlockUtils> BBUtils = nullptr;
105 /// WaterList - A sorted list of basic blocks where islands could be placed
106 /// (i.e. blocks that don't fall through to the following block, due
107 /// to a return, unreachable, or unconditional branch).
108 std::vector<MachineBasicBlock*> WaterList;
110 /// NewWaterList - The subset of WaterList that was created since the
111 /// previous iteration by inserting unconditional branches.
112 SmallSet<MachineBasicBlock*, 4> NewWaterList;
114 using water_iterator = std::vector<MachineBasicBlock *>::iterator;
116 /// CPUser - One user of a constant pool, keeping the machine instruction
117 /// pointer, the constant pool being referenced, and the max displacement
118 /// allowed from the instruction to the CP. The HighWaterMark records the
119 /// highest basic block where a new CPEntry can be placed. To ensure this
120 /// pass terminates, the CP entries are initially placed at the end of the
121 /// function and then move monotonically to lower addresses. The
122 /// exception to this rule is when the current CP entry for a particular
123 /// CPUser is out of range, but there is another CP entry for the same
124 /// constant value in range. We want to use the existing in-range CP
125 /// entry, but if it later moves out of range, the search for new water
126 /// should resume where it left off. The HighWaterMark is used to record
127 /// that point.
128 struct CPUser {
129 MachineInstr *MI;
130 MachineInstr *CPEMI;
131 MachineBasicBlock *HighWaterMark;
132 unsigned MaxDisp;
133 bool NegOk;
134 bool IsSoImm;
135 bool KnownAlignment = false;
137 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
138 bool neg, bool soimm)
139 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm) {
140 HighWaterMark = CPEMI->getParent();
143 /// getMaxDisp - Returns the maximum displacement supported by MI.
144 /// Correct for unknown alignment.
145 /// Conservatively subtract 2 bytes to handle weird alignment effects.
146 unsigned getMaxDisp() const {
147 return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2;
151 /// CPUsers - Keep track of all of the machine instructions that use various
152 /// constant pools and their max displacement.
153 std::vector<CPUser> CPUsers;
155 /// CPEntry - One per constant pool entry, keeping the machine instruction
156 /// pointer, the constpool index, and the number of CPUser's which
157 /// reference this entry.
158 struct CPEntry {
159 MachineInstr *CPEMI;
160 unsigned CPI;
161 unsigned RefCount;
163 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
164 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
167 /// CPEntries - Keep track of all of the constant pool entry machine
168 /// instructions. For each original constpool index (i.e. those that existed
169 /// upon entry to this pass), it keeps a vector of entries. Original
170 /// elements are cloned as we go along; the clones are put in the vector of
171 /// the original element, but have distinct CPIs.
173 /// The first half of CPEntries contains generic constants, the second half
174 /// contains jump tables. Use getCombinedIndex on a generic CPEMI to look up
175 /// which vector it will be in here.
176 std::vector<std::vector<CPEntry>> CPEntries;
178 /// Maps a JT index to the offset in CPEntries containing copies of that
179 /// table. The equivalent map for a CONSTPOOL_ENTRY is the identity.
180 DenseMap<int, int> JumpTableEntryIndices;
182 /// Maps a JT index to the LEA that actually uses the index to calculate its
183 /// base address.
184 DenseMap<int, int> JumpTableUserIndices;
186 /// ImmBranch - One per immediate branch, keeping the machine instruction
187 /// pointer, conditional or unconditional, the max displacement,
188 /// and (if isCond is true) the corresponding unconditional branch
189 /// opcode.
190 struct ImmBranch {
191 MachineInstr *MI;
192 unsigned MaxDisp : 31;
193 bool isCond : 1;
194 unsigned UncondBr;
196 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, unsigned ubr)
197 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
200 /// ImmBranches - Keep track of all the immediate branch instructions.
201 std::vector<ImmBranch> ImmBranches;
203 /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
204 SmallVector<MachineInstr*, 4> PushPopMIs;
206 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
207 SmallVector<MachineInstr*, 4> T2JumpTables;
209 MachineFunction *MF;
210 MachineConstantPool *MCP;
211 const ARMBaseInstrInfo *TII;
212 const ARMSubtarget *STI;
213 ARMFunctionInfo *AFI;
214 MachineDominatorTree *DT = nullptr;
215 bool isThumb;
216 bool isThumb1;
217 bool isThumb2;
218 bool isPositionIndependentOrROPI;
220 public:
221 static char ID;
223 ARMConstantIslands() : MachineFunctionPass(ID) {}
225 bool runOnMachineFunction(MachineFunction &MF) override;
227 void getAnalysisUsage(AnalysisUsage &AU) const override {
228 AU.addRequired<MachineDominatorTree>();
229 MachineFunctionPass::getAnalysisUsage(AU);
232 MachineFunctionProperties getRequiredProperties() const override {
233 return MachineFunctionProperties().set(
234 MachineFunctionProperties::Property::NoVRegs);
237 StringRef getPassName() const override {
238 return ARM_CP_ISLANDS_OPT_NAME;
241 private:
242 void doInitialConstPlacement(std::vector<MachineInstr *> &CPEMIs);
243 void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
244 bool BBHasFallthrough(MachineBasicBlock *MBB);
245 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
246 Align getCPEAlign(const MachineInstr *CPEMI);
247 void scanFunctionJumpTables();
248 void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
249 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
250 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
251 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
252 unsigned getCombinedIndex(const MachineInstr *CPEMI);
253 int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
254 bool findAvailableWater(CPUser&U, unsigned UserOffset,
255 water_iterator &WaterIter, bool CloserWater);
256 void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
257 MachineBasicBlock *&NewMBB);
258 bool handleConstantPoolUser(unsigned CPUserIndex, bool CloserWater);
259 void removeDeadCPEMI(MachineInstr *CPEMI);
260 bool removeUnusedCPEntries();
261 bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
262 MachineInstr *CPEMI, unsigned Disp, bool NegOk,
263 bool DoDump = false);
264 bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
265 CPUser &U, unsigned &Growth);
266 bool fixupImmediateBr(ImmBranch &Br);
267 bool fixupConditionalBr(ImmBranch &Br);
268 bool fixupUnconditionalBr(ImmBranch &Br);
269 bool optimizeThumb2Instructions();
270 bool optimizeThumb2Branches();
271 bool reorderThumb2JumpTables();
272 bool preserveBaseRegister(MachineInstr *JumpMI, MachineInstr *LEAMI,
273 unsigned &DeadSize, bool &CanDeleteLEA,
274 bool &BaseRegKill);
275 bool optimizeThumb2JumpTables();
276 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
277 MachineBasicBlock *JTBB);
279 unsigned getUserOffset(CPUser&) const;
280 void dumpBBs();
281 void verify();
283 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
284 unsigned Disp, bool NegativeOK, bool IsSoImm = false);
285 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
286 const CPUser &U) {
287 return isOffsetInRange(UserOffset, TrialOffset,
288 U.getMaxDisp(), U.NegOk, U.IsSoImm);
292 } // end anonymous namespace
294 char ARMConstantIslands::ID = 0;
296 /// verify - check BBOffsets, BBSizes, alignment of islands
297 void ARMConstantIslands::verify() {
298 #ifndef NDEBUG
299 BBInfoVector &BBInfo = BBUtils->getBBInfo();
300 assert(is_sorted(*MF, [&BBInfo](const MachineBasicBlock &LHS,
301 const MachineBasicBlock &RHS) {
302 return BBInfo[LHS.getNumber()].postOffset() <
303 BBInfo[RHS.getNumber()].postOffset();
304 }));
305 LLVM_DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
306 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
307 CPUser &U = CPUsers[i];
308 unsigned UserOffset = getUserOffset(U);
309 // Verify offset using the real max displacement without the safety
310 // adjustment.
311 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
312 /* DoDump = */ true)) {
313 LLVM_DEBUG(dbgs() << "OK\n");
314 continue;
316 LLVM_DEBUG(dbgs() << "Out of range.\n");
317 dumpBBs();
318 LLVM_DEBUG(MF->dump());
319 llvm_unreachable("Constant pool entry out of range!");
321 #endif
324 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
325 /// print block size and offset information - debugging
326 LLVM_DUMP_METHOD void ARMConstantIslands::dumpBBs() {
327 LLVM_DEBUG({
328 BBInfoVector &BBInfo = BBUtils->getBBInfo();
329 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
330 const BasicBlockInfo &BBI = BBInfo[J];
331 dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)
332 << " kb=" << unsigned(BBI.KnownBits)
333 << " ua=" << unsigned(BBI.Unalign) << " pa=" << Log2(BBI.PostAlign)
334 << format(" size=%#x\n", BBInfo[J].Size);
338 #endif
340 // Align blocks where the previous block does not fall through. This may add
341 // extra NOP's but they will not be executed. It uses the PrefLoopAlignment as a
342 // measure of how much to align, and only runs at CodeGenOpt::Aggressive.
343 static bool AlignBlocks(MachineFunction *MF) {
344 if (MF->getTarget().getOptLevel() != CodeGenOpt::Aggressive ||
345 MF->getFunction().hasOptSize())
346 return false;
348 auto *TLI = MF->getSubtarget().getTargetLowering();
349 const Align Alignment = TLI->getPrefLoopAlignment();
350 if (Alignment < 4)
351 return false;
353 bool Changed = false;
354 bool PrevCanFallthough = true;
355 for (auto &MBB : *MF) {
356 if (!PrevCanFallthough) {
357 Changed = true;
358 MBB.setAlignment(Alignment);
360 PrevCanFallthough = MBB.canFallThrough();
363 return Changed;
366 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
367 MF = &mf;
368 MCP = mf.getConstantPool();
369 BBUtils = std::unique_ptr<ARMBasicBlockUtils>(new ARMBasicBlockUtils(mf));
371 LLVM_DEBUG(dbgs() << "***** ARMConstantIslands: "
372 << MCP->getConstants().size() << " CP entries, aligned to "
373 << MCP->getConstantPoolAlign().value() << " bytes *****\n");
375 STI = &static_cast<const ARMSubtarget &>(MF->getSubtarget());
376 TII = STI->getInstrInfo();
377 isPositionIndependentOrROPI =
378 STI->getTargetLowering()->isPositionIndependent() || STI->isROPI();
379 AFI = MF->getInfo<ARMFunctionInfo>();
380 DT = &getAnalysis<MachineDominatorTree>();
382 isThumb = AFI->isThumbFunction();
383 isThumb1 = AFI->isThumb1OnlyFunction();
384 isThumb2 = AFI->isThumb2Function();
386 bool GenerateTBB = isThumb2 || (isThumb1 && SynthesizeThumb1TBB);
387 // TBB generation code in this constant island pass has not been adapted to
388 // deal with speculation barriers.
389 if (STI->hardenSlsRetBr())
390 GenerateTBB = false;
392 // Renumber all of the machine basic blocks in the function, guaranteeing that
393 // the numbers agree with the position of the block in the function.
394 MF->RenumberBlocks();
396 // Try to reorder and otherwise adjust the block layout to make good use
397 // of the TB[BH] instructions.
398 bool MadeChange = false;
399 if (GenerateTBB && AdjustJumpTableBlocks) {
400 scanFunctionJumpTables();
401 MadeChange |= reorderThumb2JumpTables();
402 // Data is out of date, so clear it. It'll be re-computed later.
403 T2JumpTables.clear();
404 // Blocks may have shifted around. Keep the numbering up to date.
405 MF->RenumberBlocks();
408 // Align any non-fallthrough blocks
409 MadeChange |= AlignBlocks(MF);
411 // Perform the initial placement of the constant pool entries. To start with,
412 // we put them all at the end of the function.
413 std::vector<MachineInstr*> CPEMIs;
414 if (!MCP->isEmpty())
415 doInitialConstPlacement(CPEMIs);
417 if (MF->getJumpTableInfo())
418 doInitialJumpTablePlacement(CPEMIs);
420 /// The next UID to take is the first unused one.
421 AFI->initPICLabelUId(CPEMIs.size());
423 // Do the initial scan of the function, building up information about the
424 // sizes of each block, the location of all the water, and finding all of the
425 // constant pool users.
426 initializeFunctionInfo(CPEMIs);
427 CPEMIs.clear();
428 LLVM_DEBUG(dumpBBs());
430 // Functions with jump tables need an alignment of 4 because they use the ADR
431 // instruction, which aligns the PC to 4 bytes before adding an offset.
432 if (!T2JumpTables.empty())
433 MF->ensureAlignment(Align(4));
435 /// Remove dead constant pool entries.
436 MadeChange |= removeUnusedCPEntries();
438 // Iteratively place constant pool entries and fix up branches until there
439 // is no change.
440 unsigned NoCPIters = 0, NoBRIters = 0;
441 while (true) {
442 LLVM_DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
443 bool CPChange = false;
444 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
445 // For most inputs, it converges in no more than 5 iterations.
446 // If it doesn't end in 10, the input may have huge BB or many CPEs.
447 // In this case, we will try different heuristics.
448 CPChange |= handleConstantPoolUser(i, NoCPIters >= CPMaxIteration / 2);
449 if (CPChange && ++NoCPIters > CPMaxIteration)
450 report_fatal_error("Constant Island pass failed to converge!");
451 LLVM_DEBUG(dumpBBs());
453 // Clear NewWaterList now. If we split a block for branches, it should
454 // appear as "new water" for the next iteration of constant pool placement.
455 NewWaterList.clear();
457 LLVM_DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
458 bool BRChange = false;
459 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
460 BRChange |= fixupImmediateBr(ImmBranches[i]);
461 if (BRChange && ++NoBRIters > 30)
462 report_fatal_error("Branch Fix Up pass failed to converge!");
463 LLVM_DEBUG(dumpBBs());
465 if (!CPChange && !BRChange)
466 break;
467 MadeChange = true;
470 // Shrink 32-bit Thumb2 load and store instructions.
471 if (isThumb2 && !STI->prefers32BitThumb())
472 MadeChange |= optimizeThumb2Instructions();
474 // Shrink 32-bit branch instructions.
475 if (isThumb && STI->hasV8MBaselineOps())
476 MadeChange |= optimizeThumb2Branches();
478 // Optimize jump tables using TBB / TBH.
479 if (GenerateTBB && !STI->genExecuteOnly())
480 MadeChange |= optimizeThumb2JumpTables();
482 // After a while, this might be made debug-only, but it is not expensive.
483 verify();
485 // Save the mapping between original and cloned constpool entries.
486 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
487 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
488 const CPEntry & CPE = CPEntries[i][j];
489 if (CPE.CPEMI && CPE.CPEMI->getOperand(1).isCPI())
490 AFI->recordCPEClone(i, CPE.CPI);
494 LLVM_DEBUG(dbgs() << '\n'; dumpBBs());
496 BBUtils->clear();
497 WaterList.clear();
498 CPUsers.clear();
499 CPEntries.clear();
500 JumpTableEntryIndices.clear();
501 JumpTableUserIndices.clear();
502 ImmBranches.clear();
503 PushPopMIs.clear();
504 T2JumpTables.clear();
506 return MadeChange;
509 /// Perform the initial placement of the regular constant pool entries.
510 /// To start with, we put them all at the end of the function.
511 void
512 ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs) {
513 // Create the basic block to hold the CPE's.
514 MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
515 MF->push_back(BB);
517 // MachineConstantPool measures alignment in bytes.
518 const Align MaxAlign = MCP->getConstantPoolAlign();
519 const unsigned MaxLogAlign = Log2(MaxAlign);
521 // Mark the basic block as required by the const-pool.
522 BB->setAlignment(MaxAlign);
524 // The function needs to be as aligned as the basic blocks. The linker may
525 // move functions around based on their alignment.
526 // Special case: halfword literals still need word alignment on the function.
527 Align FuncAlign = MaxAlign;
528 if (MaxAlign == 2)
529 FuncAlign = Align(4);
530 MF->ensureAlignment(FuncAlign);
532 // Order the entries in BB by descending alignment. That ensures correct
533 // alignment of all entries as long as BB is sufficiently aligned. Keep
534 // track of the insertion point for each alignment. We are going to bucket
535 // sort the entries as they are created.
536 SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxLogAlign + 1,
537 BB->end());
539 // Add all of the constants from the constant pool to the end block, use an
540 // identity mapping of CPI's to CPE's.
541 const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
543 const DataLayout &TD = MF->getDataLayout();
544 for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
545 unsigned Size = CPs[i].getSizeInBytes(TD);
546 Align Alignment = CPs[i].getAlign();
547 // Verify that all constant pool entries are a multiple of their alignment.
548 // If not, we would have to pad them out so that instructions stay aligned.
549 assert(isAligned(Alignment, Size) && "CP Entry not multiple of 4 bytes!");
551 // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
552 unsigned LogAlign = Log2(Alignment);
553 MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
554 MachineInstr *CPEMI =
555 BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
556 .addImm(i).addConstantPoolIndex(i).addImm(Size);
557 CPEMIs.push_back(CPEMI);
559 // Ensure that future entries with higher alignment get inserted before
560 // CPEMI. This is bucket sort with iterators.
561 for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a)
562 if (InsPoint[a] == InsAt)
563 InsPoint[a] = CPEMI;
565 // Add a new CPEntry, but no corresponding CPUser yet.
566 CPEntries.emplace_back(1, CPEntry(CPEMI, i));
567 ++NumCPEs;
568 LLVM_DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
569 << Size << ", align = " << Alignment.value() << '\n');
571 LLVM_DEBUG(BB->dump());
574 /// Do initial placement of the jump tables. Because Thumb2's TBB and TBH
575 /// instructions can be made more efficient if the jump table immediately
576 /// follows the instruction, it's best to place them immediately next to their
577 /// jumps to begin with. In almost all cases they'll never be moved from that
578 /// position.
579 void ARMConstantIslands::doInitialJumpTablePlacement(
580 std::vector<MachineInstr *> &CPEMIs) {
581 unsigned i = CPEntries.size();
582 auto MJTI = MF->getJumpTableInfo();
583 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
585 MachineBasicBlock *LastCorrectlyNumberedBB = nullptr;
586 for (MachineBasicBlock &MBB : *MF) {
587 auto MI = MBB.getLastNonDebugInstr();
588 // Look past potential SpeculationBarriers at end of BB.
589 while (MI != MBB.end() &&
590 (isSpeculationBarrierEndBBOpcode(MI->getOpcode()) ||
591 MI->isDebugInstr()))
592 --MI;
594 if (MI == MBB.end())
595 continue;
597 unsigned JTOpcode;
598 switch (MI->getOpcode()) {
599 default:
600 continue;
601 case ARM::BR_JTadd:
602 case ARM::BR_JTr:
603 case ARM::tBR_JTr:
604 case ARM::BR_JTm_i12:
605 case ARM::BR_JTm_rs:
606 JTOpcode = ARM::JUMPTABLE_ADDRS;
607 break;
608 case ARM::t2BR_JT:
609 JTOpcode = ARM::JUMPTABLE_INSTS;
610 break;
611 case ARM::tTBB_JT:
612 case ARM::t2TBB_JT:
613 JTOpcode = ARM::JUMPTABLE_TBB;
614 break;
615 case ARM::tTBH_JT:
616 case ARM::t2TBH_JT:
617 JTOpcode = ARM::JUMPTABLE_TBH;
618 break;
621 unsigned NumOps = MI->getDesc().getNumOperands();
622 MachineOperand JTOp =
623 MI->getOperand(NumOps - (MI->isPredicable() ? 2 : 1));
624 unsigned JTI = JTOp.getIndex();
625 unsigned Size = JT[JTI].MBBs.size() * sizeof(uint32_t);
626 MachineBasicBlock *JumpTableBB = MF->CreateMachineBasicBlock();
627 MF->insert(std::next(MachineFunction::iterator(MBB)), JumpTableBB);
628 MachineInstr *CPEMI = BuildMI(*JumpTableBB, JumpTableBB->begin(),
629 DebugLoc(), TII->get(JTOpcode))
630 .addImm(i++)
631 .addJumpTableIndex(JTI)
632 .addImm(Size);
633 CPEMIs.push_back(CPEMI);
634 CPEntries.emplace_back(1, CPEntry(CPEMI, JTI));
635 JumpTableEntryIndices.insert(std::make_pair(JTI, CPEntries.size() - 1));
636 if (!LastCorrectlyNumberedBB)
637 LastCorrectlyNumberedBB = &MBB;
640 // If we did anything then we need to renumber the subsequent blocks.
641 if (LastCorrectlyNumberedBB)
642 MF->RenumberBlocks(LastCorrectlyNumberedBB);
645 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
646 /// into the block immediately after it.
647 bool ARMConstantIslands::BBHasFallthrough(MachineBasicBlock *MBB) {
648 // Get the next machine basic block in the function.
649 MachineFunction::iterator MBBI = MBB->getIterator();
650 // Can't fall off end of function.
651 if (std::next(MBBI) == MBB->getParent()->end())
652 return false;
654 MachineBasicBlock *NextBB = &*std::next(MBBI);
655 if (!MBB->isSuccessor(NextBB))
656 return false;
658 // Try to analyze the end of the block. A potential fallthrough may already
659 // have an unconditional branch for whatever reason.
660 MachineBasicBlock *TBB, *FBB;
661 SmallVector<MachineOperand, 4> Cond;
662 bool TooDifficult = TII->analyzeBranch(*MBB, TBB, FBB, Cond);
663 return TooDifficult || FBB == nullptr;
666 /// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
667 /// look up the corresponding CPEntry.
668 ARMConstantIslands::CPEntry *
669 ARMConstantIslands::findConstPoolEntry(unsigned CPI,
670 const MachineInstr *CPEMI) {
671 std::vector<CPEntry> &CPEs = CPEntries[CPI];
672 // Number of entries per constpool index should be small, just do a
673 // linear search.
674 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
675 if (CPEs[i].CPEMI == CPEMI)
676 return &CPEs[i];
678 return nullptr;
681 /// getCPEAlign - Returns the required alignment of the constant pool entry
682 /// represented by CPEMI.
683 Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) {
684 switch (CPEMI->getOpcode()) {
685 case ARM::CONSTPOOL_ENTRY:
686 break;
687 case ARM::JUMPTABLE_TBB:
688 return isThumb1 ? Align(4) : Align(1);
689 case ARM::JUMPTABLE_TBH:
690 return isThumb1 ? Align(4) : Align(2);
691 case ARM::JUMPTABLE_INSTS:
692 return Align(2);
693 case ARM::JUMPTABLE_ADDRS:
694 return Align(4);
695 default:
696 llvm_unreachable("unknown constpool entry kind");
699 unsigned CPI = getCombinedIndex(CPEMI);
700 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
701 return MCP->getConstants()[CPI].getAlign();
704 /// scanFunctionJumpTables - Do a scan of the function, building up
705 /// information about the sizes of each block and the locations of all
706 /// the jump tables.
707 void ARMConstantIslands::scanFunctionJumpTables() {
708 for (MachineBasicBlock &MBB : *MF) {
709 for (MachineInstr &I : MBB)
710 if (I.isBranch() &&
711 (I.getOpcode() == ARM::t2BR_JT || I.getOpcode() == ARM::tBR_JTr))
712 T2JumpTables.push_back(&I);
716 /// initializeFunctionInfo - Do the initial scan of the function, building up
717 /// information about the sizes of each block, the location of all the water,
718 /// and finding all of the constant pool users.
719 void ARMConstantIslands::
720 initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
722 BBUtils->computeAllBlockSizes();
723 BBInfoVector &BBInfo = BBUtils->getBBInfo();
724 // The known bits of the entry block offset are determined by the function
725 // alignment.
726 BBInfo.front().KnownBits = Log2(MF->getAlignment());
728 // Compute block offsets and known bits.
729 BBUtils->adjustBBOffsetsAfter(&MF->front());
731 // Now go back through the instructions and build up our data structures.
732 for (MachineBasicBlock &MBB : *MF) {
733 // If this block doesn't fall through into the next MBB, then this is
734 // 'water' that a constant pool island could be placed.
735 if (!BBHasFallthrough(&MBB))
736 WaterList.push_back(&MBB);
738 for (MachineInstr &I : MBB) {
739 if (I.isDebugInstr())
740 continue;
742 unsigned Opc = I.getOpcode();
743 if (I.isBranch()) {
744 bool isCond = false;
745 unsigned Bits = 0;
746 unsigned Scale = 1;
747 int UOpc = Opc;
748 switch (Opc) {
749 default:
750 continue; // Ignore other JT branches
751 case ARM::t2BR_JT:
752 case ARM::tBR_JTr:
753 T2JumpTables.push_back(&I);
754 continue; // Does not get an entry in ImmBranches
755 case ARM::Bcc:
756 isCond = true;
757 UOpc = ARM::B;
758 LLVM_FALLTHROUGH;
759 case ARM::B:
760 Bits = 24;
761 Scale = 4;
762 break;
763 case ARM::tBcc:
764 isCond = true;
765 UOpc = ARM::tB;
766 Bits = 8;
767 Scale = 2;
768 break;
769 case ARM::tB:
770 Bits = 11;
771 Scale = 2;
772 break;
773 case ARM::t2Bcc:
774 isCond = true;
775 UOpc = ARM::t2B;
776 Bits = 20;
777 Scale = 2;
778 break;
779 case ARM::t2B:
780 Bits = 24;
781 Scale = 2;
782 break;
785 // Record this immediate branch.
786 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
787 ImmBranches.push_back(ImmBranch(&I, MaxOffs, isCond, UOpc));
790 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
791 PushPopMIs.push_back(&I);
793 if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
794 Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
795 Opc == ARM::JUMPTABLE_TBH)
796 continue;
798 // Scan the instructions for constant pool operands.
799 for (unsigned op = 0, e = I.getNumOperands(); op != e; ++op)
800 if (I.getOperand(op).isCPI() || I.getOperand(op).isJTI()) {
801 // We found one. The addressing mode tells us the max displacement
802 // from the PC that this instruction permits.
804 // Basic size info comes from the TSFlags field.
805 unsigned Bits = 0;
806 unsigned Scale = 1;
807 bool NegOk = false;
808 bool IsSoImm = false;
810 switch (Opc) {
811 default:
812 llvm_unreachable("Unknown addressing mode for CP reference!");
814 // Taking the address of a CP entry.
815 case ARM::LEApcrel:
816 case ARM::LEApcrelJT: {
817 // This takes a SoImm, which is 8 bit immediate rotated. We'll
818 // pretend the maximum offset is 255 * 4. Since each instruction
819 // 4 byte wide, this is always correct. We'll check for other
820 // displacements that fits in a SoImm as well.
821 Bits = 8;
822 NegOk = true;
823 IsSoImm = true;
824 unsigned CPI = I.getOperand(op).getIndex();
825 assert(CPI < CPEMIs.size());
826 MachineInstr *CPEMI = CPEMIs[CPI];
827 const Align CPEAlign = getCPEAlign(CPEMI);
828 const unsigned LogCPEAlign = Log2(CPEAlign);
829 if (LogCPEAlign >= 2)
830 Scale = 4;
831 else
832 // For constants with less than 4-byte alignment,
833 // we'll pretend the maximum offset is 255 * 1.
834 Scale = 1;
836 break;
837 case ARM::t2LEApcrel:
838 case ARM::t2LEApcrelJT:
839 Bits = 12;
840 NegOk = true;
841 break;
842 case ARM::tLEApcrel:
843 case ARM::tLEApcrelJT:
844 Bits = 8;
845 Scale = 4;
846 break;
848 case ARM::LDRBi12:
849 case ARM::LDRi12:
850 case ARM::LDRcp:
851 case ARM::t2LDRpci:
852 case ARM::t2LDRHpci:
853 case ARM::t2LDRSHpci:
854 case ARM::t2LDRBpci:
855 case ARM::t2LDRSBpci:
856 Bits = 12; // +-offset_12
857 NegOk = true;
858 break;
860 case ARM::tLDRpci:
861 Bits = 8;
862 Scale = 4; // +(offset_8*4)
863 break;
865 case ARM::VLDRD:
866 case ARM::VLDRS:
867 Bits = 8;
868 Scale = 4; // +-(offset_8*4)
869 NegOk = true;
870 break;
871 case ARM::VLDRH:
872 Bits = 8;
873 Scale = 2; // +-(offset_8*2)
874 NegOk = true;
875 break;
878 // Remember that this is a user of a CP entry.
879 unsigned CPI = I.getOperand(op).getIndex();
880 if (I.getOperand(op).isJTI()) {
881 JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
882 CPI = JumpTableEntryIndices[CPI];
885 MachineInstr *CPEMI = CPEMIs[CPI];
886 unsigned MaxOffs = ((1 << Bits)-1) * Scale;
887 CPUsers.push_back(CPUser(&I, CPEMI, MaxOffs, NegOk, IsSoImm));
889 // Increment corresponding CPEntry reference count.
890 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
891 assert(CPE && "Cannot find a corresponding CPEntry!");
892 CPE->RefCount++;
894 // Instructions can only use one CP entry, don't bother scanning the
895 // rest of the operands.
896 break;
902 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
903 /// ID.
904 static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
905 const MachineBasicBlock *RHS) {
906 return LHS->getNumber() < RHS->getNumber();
909 /// updateForInsertedWaterBlock - When a block is newly inserted into the
910 /// machine function, it upsets all of the block numbers. Renumber the blocks
911 /// and update the arrays that parallel this numbering.
912 void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
913 // Renumber the MBB's to keep them consecutive.
914 NewBB->getParent()->RenumberBlocks(NewBB);
916 // Insert an entry into BBInfo to align it properly with the (newly
917 // renumbered) block numbers.
918 BBUtils->insert(NewBB->getNumber(), BasicBlockInfo());
920 // Next, update WaterList. Specifically, we need to add NewMBB as having
921 // available water after it.
922 water_iterator IP = llvm::lower_bound(WaterList, NewBB, CompareMBBNumbers);
923 WaterList.insert(IP, NewBB);
926 /// Split the basic block containing MI into two blocks, which are joined by
927 /// an unconditional branch. Update data structures and renumber blocks to
928 /// account for this change and returns the newly created block.
929 MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
930 MachineBasicBlock *OrigBB = MI->getParent();
932 // Collect liveness information at MI.
933 LivePhysRegs LRs(*MF->getSubtarget().getRegisterInfo());
934 LRs.addLiveOuts(*OrigBB);
935 auto LivenessEnd = ++MachineBasicBlock::iterator(MI).getReverse();
936 for (MachineInstr &LiveMI : make_range(OrigBB->rbegin(), LivenessEnd))
937 LRs.stepBackward(LiveMI);
939 // Create a new MBB for the code after the OrigBB.
940 MachineBasicBlock *NewBB =
941 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
942 MachineFunction::iterator MBBI = ++OrigBB->getIterator();
943 MF->insert(MBBI, NewBB);
945 // Splice the instructions starting with MI over to NewBB.
946 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
948 // Add an unconditional branch from OrigBB to NewBB.
949 // Note the new unconditional branch is not being recorded.
950 // There doesn't seem to be meaningful DebugInfo available; this doesn't
951 // correspond to anything in the source.
952 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
953 if (!isThumb)
954 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
955 else
956 BuildMI(OrigBB, DebugLoc(), TII->get(Opc))
957 .addMBB(NewBB)
958 .add(predOps(ARMCC::AL));
959 ++NumSplit;
961 // Update the CFG. All succs of OrigBB are now succs of NewBB.
962 NewBB->transferSuccessors(OrigBB);
964 // OrigBB branches to NewBB.
965 OrigBB->addSuccessor(NewBB);
967 // Update live-in information in the new block.
968 MachineRegisterInfo &MRI = MF->getRegInfo();
969 for (MCPhysReg L : LRs)
970 if (!MRI.isReserved(L))
971 NewBB->addLiveIn(L);
973 // Update internal data structures to account for the newly inserted MBB.
974 // This is almost the same as updateForInsertedWaterBlock, except that
975 // the Water goes after OrigBB, not NewBB.
976 MF->RenumberBlocks(NewBB);
978 // Insert an entry into BBInfo to align it properly with the (newly
979 // renumbered) block numbers.
980 BBUtils->insert(NewBB->getNumber(), BasicBlockInfo());
982 // Next, update WaterList. Specifically, we need to add OrigMBB as having
983 // available water after it (but not if it's already there, which happens
984 // when splitting before a conditional branch that is followed by an
985 // unconditional branch - in that case we want to insert NewBB).
986 water_iterator IP = llvm::lower_bound(WaterList, OrigBB, CompareMBBNumbers);
987 MachineBasicBlock* WaterBB = *IP;
988 if (WaterBB == OrigBB)
989 WaterList.insert(std::next(IP), NewBB);
990 else
991 WaterList.insert(IP, OrigBB);
992 NewWaterList.insert(OrigBB);
994 // Figure out how large the OrigBB is. As the first half of the original
995 // block, it cannot contain a tablejump. The size includes
996 // the new jump we added. (It should be possible to do this without
997 // recounting everything, but it's very confusing, and this is rarely
998 // executed.)
999 BBUtils->computeBlockSize(OrigBB);
1001 // Figure out how large the NewMBB is. As the second half of the original
1002 // block, it may contain a tablejump.
1003 BBUtils->computeBlockSize(NewBB);
1005 // All BBOffsets following these blocks must be modified.
1006 BBUtils->adjustBBOffsetsAfter(OrigBB);
1008 return NewBB;
1011 /// getUserOffset - Compute the offset of U.MI as seen by the hardware
1012 /// displacement computation. Update U.KnownAlignment to match its current
1013 /// basic block location.
1014 unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
1015 unsigned UserOffset = BBUtils->getOffsetOf(U.MI);
1017 SmallVectorImpl<BasicBlockInfo> &BBInfo = BBUtils->getBBInfo();
1018 const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
1019 unsigned KnownBits = BBI.internalKnownBits();
1021 // The value read from PC is offset from the actual instruction address.
1022 UserOffset += (isThumb ? 4 : 8);
1024 // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
1025 // Make sure U.getMaxDisp() returns a constrained range.
1026 U.KnownAlignment = (KnownBits >= 2);
1028 // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
1029 // purposes of the displacement computation; compensate for that here.
1030 // For unknown alignments, getMaxDisp() constrains the range instead.
1031 if (isThumb && U.KnownAlignment)
1032 UserOffset &= ~3u;
1034 return UserOffset;
1037 /// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
1038 /// reference) is within MaxDisp of TrialOffset (a proposed location of a
1039 /// constant pool entry).
1040 /// UserOffset is computed by getUserOffset above to include PC adjustments. If
1041 /// the mod 4 alignment of UserOffset is not known, the uncertainty must be
1042 /// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
1043 bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
1044 unsigned TrialOffset, unsigned MaxDisp,
1045 bool NegativeOK, bool IsSoImm) {
1046 if (UserOffset <= TrialOffset) {
1047 // User before the Trial.
1048 if (TrialOffset - UserOffset <= MaxDisp)
1049 return true;
1050 // FIXME: Make use full range of soimm values.
1051 } else if (NegativeOK) {
1052 if (UserOffset - TrialOffset <= MaxDisp)
1053 return true;
1054 // FIXME: Make use full range of soimm values.
1056 return false;
1059 /// isWaterInRange - Returns true if a CPE placed after the specified
1060 /// Water (a basic block) will be in range for the specific MI.
1062 /// Compute how much the function will grow by inserting a CPE after Water.
1063 bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
1064 MachineBasicBlock* Water, CPUser &U,
1065 unsigned &Growth) {
1066 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1067 const Align CPEAlign = getCPEAlign(U.CPEMI);
1068 const unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPEAlign);
1069 unsigned NextBlockOffset;
1070 Align NextBlockAlignment;
1071 MachineFunction::const_iterator NextBlock = Water->getIterator();
1072 if (++NextBlock == MF->end()) {
1073 NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
1074 } else {
1075 NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
1076 NextBlockAlignment = NextBlock->getAlignment();
1078 unsigned Size = U.CPEMI->getOperand(2).getImm();
1079 unsigned CPEEnd = CPEOffset + Size;
1081 // The CPE may be able to hide in the alignment padding before the next
1082 // block. It may also cause more padding to be required if it is more aligned
1083 // that the next block.
1084 if (CPEEnd > NextBlockOffset) {
1085 Growth = CPEEnd - NextBlockOffset;
1086 // Compute the padding that would go at the end of the CPE to align the next
1087 // block.
1088 Growth += offsetToAlignment(CPEEnd, NextBlockAlignment);
1090 // If the CPE is to be inserted before the instruction, that will raise
1091 // the offset of the instruction. Also account for unknown alignment padding
1092 // in blocks between CPE and the user.
1093 if (CPEOffset < UserOffset)
1094 UserOffset += Growth + UnknownPadding(MF->getAlignment(), Log2(CPEAlign));
1095 } else
1096 // CPE fits in existing padding.
1097 Growth = 0;
1099 return isOffsetInRange(UserOffset, CPEOffset, U);
1102 /// isCPEntryInRange - Returns true if the distance between specific MI and
1103 /// specific ConstPool entry instruction can fit in MI's displacement field.
1104 bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
1105 MachineInstr *CPEMI, unsigned MaxDisp,
1106 bool NegOk, bool DoDump) {
1107 unsigned CPEOffset = BBUtils->getOffsetOf(CPEMI);
1109 if (DoDump) {
1110 LLVM_DEBUG({
1111 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1112 unsigned Block = MI->getParent()->getNumber();
1113 const BasicBlockInfo &BBI = BBInfo[Block];
1114 dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
1115 << " max delta=" << MaxDisp
1116 << format(" insn address=%#x", UserOffset) << " in "
1117 << printMBBReference(*MI->getParent()) << ": "
1118 << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
1119 << format("CPE address=%#x offset=%+d: ", CPEOffset,
1120 int(CPEOffset - UserOffset));
1124 return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
1127 #ifndef NDEBUG
1128 /// BBIsJumpedOver - Return true of the specified basic block's only predecessor
1129 /// unconditionally branches to its only successor.
1130 static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
1131 if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
1132 return false;
1134 MachineBasicBlock *Succ = *MBB->succ_begin();
1135 MachineBasicBlock *Pred = *MBB->pred_begin();
1136 MachineInstr *PredMI = &Pred->back();
1137 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
1138 || PredMI->getOpcode() == ARM::t2B)
1139 return PredMI->getOperand(0).getMBB() == Succ;
1140 return false;
1142 #endif // NDEBUG
1144 /// decrementCPEReferenceCount - find the constant pool entry with index CPI
1145 /// and instruction CPEMI, and decrement its refcount. If the refcount
1146 /// becomes 0 remove the entry and instruction. Returns true if we removed
1147 /// the entry, false if we didn't.
1148 bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
1149 MachineInstr *CPEMI) {
1150 // Find the old entry. Eliminate it if it is no longer used.
1151 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
1152 assert(CPE && "Unexpected!");
1153 if (--CPE->RefCount == 0) {
1154 removeDeadCPEMI(CPEMI);
1155 CPE->CPEMI = nullptr;
1156 --NumCPEs;
1157 return true;
1159 return false;
1162 unsigned ARMConstantIslands::getCombinedIndex(const MachineInstr *CPEMI) {
1163 if (CPEMI->getOperand(1).isCPI())
1164 return CPEMI->getOperand(1).getIndex();
1166 return JumpTableEntryIndices[CPEMI->getOperand(1).getIndex()];
1169 /// LookForCPEntryInRange - see if the currently referenced CPE is in range;
1170 /// if not, see if an in-range clone of the CPE is in range, and if so,
1171 /// change the data structures so the user references the clone. Returns:
1172 /// 0 = no existing entry found
1173 /// 1 = entry found, and there were no code insertions or deletions
1174 /// 2 = entry found, and there were code insertions or deletions
1175 int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) {
1176 MachineInstr *UserMI = U.MI;
1177 MachineInstr *CPEMI = U.CPEMI;
1179 // Check to see if the CPE is already in-range.
1180 if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
1181 true)) {
1182 LLVM_DEBUG(dbgs() << "In range\n");
1183 return 1;
1186 // No. Look for previously created clones of the CPE that are in range.
1187 unsigned CPI = getCombinedIndex(CPEMI);
1188 std::vector<CPEntry> &CPEs = CPEntries[CPI];
1189 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
1190 // We already tried this one
1191 if (CPEs[i].CPEMI == CPEMI)
1192 continue;
1193 // Removing CPEs can leave empty entries, skip
1194 if (CPEs[i].CPEMI == nullptr)
1195 continue;
1196 if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
1197 U.NegOk)) {
1198 LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
1199 << CPEs[i].CPI << "\n");
1200 // Point the CPUser node to the replacement
1201 U.CPEMI = CPEs[i].CPEMI;
1202 // Change the CPI in the instruction operand to refer to the clone.
1203 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
1204 if (UserMI->getOperand(j).isCPI()) {
1205 UserMI->getOperand(j).setIndex(CPEs[i].CPI);
1206 break;
1208 // Adjust the refcount of the clone...
1209 CPEs[i].RefCount++;
1210 // ...and the original. If we didn't remove the old entry, none of the
1211 // addresses changed, so we don't need another pass.
1212 return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
1215 return 0;
1218 /// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
1219 /// the specific unconditional branch instruction.
1220 static inline unsigned getUnconditionalBrDisp(int Opc) {
1221 switch (Opc) {
1222 case ARM::tB:
1223 return ((1<<10)-1)*2;
1224 case ARM::t2B:
1225 return ((1<<23)-1)*2;
1226 default:
1227 break;
1230 return ((1<<23)-1)*4;
1233 /// findAvailableWater - Look for an existing entry in the WaterList in which
1234 /// we can place the CPE referenced from U so it's within range of U's MI.
1235 /// Returns true if found, false if not. If it returns true, WaterIter
1236 /// is set to the WaterList entry. For Thumb, prefer water that will not
1237 /// introduce padding to water that will. To ensure that this pass
1238 /// terminates, the CPE location for a particular CPUser is only allowed to
1239 /// move to a lower address, so search backward from the end of the list and
1240 /// prefer the first water that is in range.
1241 bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
1242 water_iterator &WaterIter,
1243 bool CloserWater) {
1244 if (WaterList.empty())
1245 return false;
1247 unsigned BestGrowth = ~0u;
1248 // The nearest water without splitting the UserBB is right after it.
1249 // If the distance is still large (we have a big BB), then we need to split it
1250 // if we don't converge after certain iterations. This helps the following
1251 // situation to converge:
1252 // BB0:
1253 // Big BB
1254 // BB1:
1255 // Constant Pool
1256 // When a CP access is out of range, BB0 may be used as water. However,
1257 // inserting islands between BB0 and BB1 makes other accesses out of range.
1258 MachineBasicBlock *UserBB = U.MI->getParent();
1259 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1260 const Align CPEAlign = getCPEAlign(U.CPEMI);
1261 unsigned MinNoSplitDisp = BBInfo[UserBB->getNumber()].postOffset(CPEAlign);
1262 if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
1263 return false;
1264 for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
1265 --IP) {
1266 MachineBasicBlock* WaterBB = *IP;
1267 // Check if water is in range and is either at a lower address than the
1268 // current "high water mark" or a new water block that was created since
1269 // the previous iteration by inserting an unconditional branch. In the
1270 // latter case, we want to allow resetting the high water mark back to
1271 // this new water since we haven't seen it before. Inserting branches
1272 // should be relatively uncommon and when it does happen, we want to be
1273 // sure to take advantage of it for all the CPEs near that block, so that
1274 // we don't insert more branches than necessary.
1275 // When CloserWater is true, we try to find the lowest address after (or
1276 // equal to) user MI's BB no matter of padding growth.
1277 unsigned Growth;
1278 if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
1279 (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
1280 NewWaterList.count(WaterBB) || WaterBB == U.MI->getParent()) &&
1281 Growth < BestGrowth) {
1282 // This is the least amount of required padding seen so far.
1283 BestGrowth = Growth;
1284 WaterIter = IP;
1285 LLVM_DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
1286 << " Growth=" << Growth << '\n');
1288 if (CloserWater && WaterBB == U.MI->getParent())
1289 return true;
1290 // Keep looking unless it is perfect and we're not looking for the lowest
1291 // possible address.
1292 if (!CloserWater && BestGrowth == 0)
1293 return true;
1295 if (IP == B)
1296 break;
1298 return BestGrowth != ~0u;
1301 /// createNewWater - No existing WaterList entry will work for
1302 /// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
1303 /// block is used if in range, and the conditional branch munged so control
1304 /// flow is correct. Otherwise the block is split to create a hole with an
1305 /// unconditional branch around it. In either case NewMBB is set to a
1306 /// block following which the new island can be inserted (the WaterList
1307 /// is not adjusted).
1308 void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
1309 unsigned UserOffset,
1310 MachineBasicBlock *&NewMBB) {
1311 CPUser &U = CPUsers[CPUserIndex];
1312 MachineInstr *UserMI = U.MI;
1313 MachineInstr *CPEMI = U.CPEMI;
1314 const Align CPEAlign = getCPEAlign(CPEMI);
1315 MachineBasicBlock *UserMBB = UserMI->getParent();
1316 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1317 const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
1319 // If the block does not end in an unconditional branch already, and if the
1320 // end of the block is within range, make new water there. (The addition
1321 // below is for the unconditional branch we will be adding: 4 bytes on ARM +
1322 // Thumb2, 2 on Thumb1.
1323 if (BBHasFallthrough(UserMBB)) {
1324 // Size of branch to insert.
1325 unsigned Delta = isThumb1 ? 2 : 4;
1326 // Compute the offset where the CPE will begin.
1327 unsigned CPEOffset = UserBBI.postOffset(CPEAlign) + Delta;
1329 if (isOffsetInRange(UserOffset, CPEOffset, U)) {
1330 LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
1331 << format(", expected CPE offset %#x\n", CPEOffset));
1332 NewMBB = &*++UserMBB->getIterator();
1333 // Add an unconditional branch from UserMBB to fallthrough block. Record
1334 // it for branch lengthening; this new branch will not get out of range,
1335 // but if the preceding conditional branch is out of range, the targets
1336 // will be exchanged, and the altered branch may be out of range, so the
1337 // machinery has to know about it.
1338 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
1339 if (!isThumb)
1340 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
1341 else
1342 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr))
1343 .addMBB(NewMBB)
1344 .add(predOps(ARMCC::AL));
1345 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
1346 ImmBranches.push_back(ImmBranch(&UserMBB->back(),
1347 MaxDisp, false, UncondBr));
1348 BBUtils->computeBlockSize(UserMBB);
1349 BBUtils->adjustBBOffsetsAfter(UserMBB);
1350 return;
1354 // What a big block. Find a place within the block to split it. This is a
1355 // little tricky on Thumb1 since instructions are 2 bytes and constant pool
1356 // entries are 4 bytes: if instruction I references island CPE, and
1357 // instruction I+1 references CPE', it will not work well to put CPE as far
1358 // forward as possible, since then CPE' cannot immediately follow it (that
1359 // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
1360 // need to create a new island. So, we make a first guess, then walk through
1361 // the instructions between the one currently being looked at and the
1362 // possible insertion point, and make sure any other instructions that
1363 // reference CPEs will be able to use the same island area; if not, we back
1364 // up the insertion point.
1366 // Try to split the block so it's fully aligned. Compute the latest split
1367 // point where we can add a 4-byte branch instruction, and then align to
1368 // Align which is the largest possible alignment in the function.
1369 const Align Align = MF->getAlignment();
1370 assert(Align >= CPEAlign && "Over-aligned constant pool entry");
1371 unsigned KnownBits = UserBBI.internalKnownBits();
1372 unsigned UPad = UnknownPadding(Align, KnownBits);
1373 unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
1374 LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
1375 BaseInsertOffset));
1377 // The 4 in the following is for the unconditional branch we'll be inserting
1378 // (allows for long branch on Thumb1). Alignment of the island is handled
1379 // inside isOffsetInRange.
1380 BaseInsertOffset -= 4;
1382 LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
1383 << " la=" << Log2(Align) << " kb=" << KnownBits
1384 << " up=" << UPad << '\n');
1386 // This could point off the end of the block if we've already got constant
1387 // pool entries following this block; only the last one is in the water list.
1388 // Back past any possible branches (allow for a conditional and a maximally
1389 // long unconditional).
1390 if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
1391 // Ensure BaseInsertOffset is larger than the offset of the instruction
1392 // following UserMI so that the loop which searches for the split point
1393 // iterates at least once.
1394 BaseInsertOffset =
1395 std::max(UserBBI.postOffset() - UPad - 8,
1396 UserOffset + TII->getInstSizeInBytes(*UserMI) + 1);
1397 // If the CP is referenced(ie, UserOffset) is in first four instructions
1398 // after IT, this recalculated BaseInsertOffset could be in the middle of
1399 // an IT block. If it is, change the BaseInsertOffset to just after the
1400 // IT block. This still make the CP Entry is in range becuase of the
1401 // following reasons.
1402 // 1. The initial BaseseInsertOffset calculated is (UserOffset +
1403 // U.getMaxDisp() - UPad).
1404 // 2. An IT block is only at most 4 instructions plus the "it" itself (18
1405 // bytes).
1406 // 3. All the relevant instructions support much larger Maximum
1407 // displacement.
1408 MachineBasicBlock::iterator I = UserMI;
1409 ++I;
1410 Register PredReg;
1411 for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI);
1412 I->getOpcode() != ARM::t2IT &&
1413 getITInstrPredicate(*I, PredReg) != ARMCC::AL;
1414 Offset += TII->getInstSizeInBytes(*I), I = std::next(I)) {
1415 BaseInsertOffset =
1416 std::max(BaseInsertOffset, Offset + TII->getInstSizeInBytes(*I) + 1);
1417 assert(I != UserMBB->end() && "Fell off end of block");
1419 LLVM_DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
1421 unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
1422 CPEMI->getOperand(2).getImm();
1423 MachineBasicBlock::iterator MI = UserMI;
1424 ++MI;
1425 unsigned CPUIndex = CPUserIndex+1;
1426 unsigned NumCPUsers = CPUsers.size();
1427 MachineInstr *LastIT = nullptr;
1428 for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI);
1429 Offset < BaseInsertOffset;
1430 Offset += TII->getInstSizeInBytes(*MI), MI = std::next(MI)) {
1431 assert(MI != UserMBB->end() && "Fell off end of block");
1432 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == &*MI) {
1433 CPUser &U = CPUsers[CPUIndex];
1434 if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
1435 // Shift intertion point by one unit of alignment so it is within reach.
1436 BaseInsertOffset -= Align.value();
1437 EndInsertOffset -= Align.value();
1439 // This is overly conservative, as we don't account for CPEMIs being
1440 // reused within the block, but it doesn't matter much. Also assume CPEs
1441 // are added in order with alignment padding. We may eventually be able
1442 // to pack the aligned CPEs better.
1443 EndInsertOffset += U.CPEMI->getOperand(2).getImm();
1444 CPUIndex++;
1447 // Remember the last IT instruction.
1448 if (MI->getOpcode() == ARM::t2IT)
1449 LastIT = &*MI;
1452 --MI;
1454 // Avoid splitting an IT block.
1455 if (LastIT) {
1456 Register PredReg;
1457 ARMCC::CondCodes CC = getITInstrPredicate(*MI, PredReg);
1458 if (CC != ARMCC::AL)
1459 MI = LastIT;
1462 // Avoid splitting a MOVW+MOVT pair with a relocation on Windows.
1463 // On Windows, this instruction pair is covered by one single
1464 // IMAGE_REL_ARM_MOV32T relocation which covers both instructions. If a
1465 // constant island is injected inbetween them, the relocation will clobber
1466 // the instruction and fail to update the MOVT instruction.
1467 // (These instructions are bundled up until right before the ConstantIslands
1468 // pass.)
1469 if (STI->isTargetWindows() && isThumb && MI->getOpcode() == ARM::t2MOVTi16 &&
1470 (MI->getOperand(2).getTargetFlags() & ARMII::MO_OPTION_MASK) ==
1471 ARMII::MO_HI16) {
1472 --MI;
1473 assert(MI->getOpcode() == ARM::t2MOVi16 &&
1474 (MI->getOperand(1).getTargetFlags() & ARMII::MO_OPTION_MASK) ==
1475 ARMII::MO_LO16);
1478 // We really must not split an IT block.
1479 #ifndef NDEBUG
1480 Register PredReg;
1481 assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL);
1482 #endif
1483 NewMBB = splitBlockBeforeInstr(&*MI);
1486 /// handleConstantPoolUser - Analyze the specified user, checking to see if it
1487 /// is out-of-range. If so, pick up the constant pool value and move it some
1488 /// place in-range. Return true if we changed any addresses (thus must run
1489 /// another pass of branch lengthening), false otherwise.
1490 bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
1491 bool CloserWater) {
1492 CPUser &U = CPUsers[CPUserIndex];
1493 MachineInstr *UserMI = U.MI;
1494 MachineInstr *CPEMI = U.CPEMI;
1495 unsigned CPI = getCombinedIndex(CPEMI);
1496 unsigned Size = CPEMI->getOperand(2).getImm();
1497 // Compute this only once, it's expensive.
1498 unsigned UserOffset = getUserOffset(U);
1500 // See if the current entry is within range, or there is a clone of it
1501 // in range.
1502 int result = findInRangeCPEntry(U, UserOffset);
1503 if (result==1) return false;
1504 else if (result==2) return true;
1506 // No existing clone of this CPE is within range.
1507 // We will be generating a new clone. Get a UID for it.
1508 unsigned ID = AFI->createPICLabelUId();
1510 // Look for water where we can place this CPE.
1511 MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
1512 MachineBasicBlock *NewMBB;
1513 water_iterator IP;
1514 if (findAvailableWater(U, UserOffset, IP, CloserWater)) {
1515 LLVM_DEBUG(dbgs() << "Found water in range\n");
1516 MachineBasicBlock *WaterBB = *IP;
1518 // If the original WaterList entry was "new water" on this iteration,
1519 // propagate that to the new island. This is just keeping NewWaterList
1520 // updated to match the WaterList, which will be updated below.
1521 if (NewWaterList.erase(WaterBB))
1522 NewWaterList.insert(NewIsland);
1524 // The new CPE goes before the following block (NewMBB).
1525 NewMBB = &*++WaterBB->getIterator();
1526 } else {
1527 // No water found.
1528 LLVM_DEBUG(dbgs() << "No water found\n");
1529 createNewWater(CPUserIndex, UserOffset, NewMBB);
1531 // splitBlockBeforeInstr adds to WaterList, which is important when it is
1532 // called while handling branches so that the water will be seen on the
1533 // next iteration for constant pools, but in this context, we don't want
1534 // it. Check for this so it will be removed from the WaterList.
1535 // Also remove any entry from NewWaterList.
1536 MachineBasicBlock *WaterBB = &*--NewMBB->getIterator();
1537 IP = find(WaterList, WaterBB);
1538 if (IP != WaterList.end())
1539 NewWaterList.erase(WaterBB);
1541 // We are adding new water. Update NewWaterList.
1542 NewWaterList.insert(NewIsland);
1544 // Always align the new block because CP entries can be smaller than 4
1545 // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
1546 // be an already aligned constant pool block.
1547 const Align Alignment = isThumb ? Align(2) : Align(4);
1548 if (NewMBB->getAlignment() < Alignment)
1549 NewMBB->setAlignment(Alignment);
1551 // Remove the original WaterList entry; we want subsequent insertions in
1552 // this vicinity to go after the one we're about to insert. This
1553 // considerably reduces the number of times we have to move the same CPE
1554 // more than once and is also important to ensure the algorithm terminates.
1555 if (IP != WaterList.end())
1556 WaterList.erase(IP);
1558 // Okay, we know we can put an island before NewMBB now, do it!
1559 MF->insert(NewMBB->getIterator(), NewIsland);
1561 // Update internal data structures to account for the newly inserted MBB.
1562 updateForInsertedWaterBlock(NewIsland);
1564 // Now that we have an island to add the CPE to, clone the original CPE and
1565 // add it to the island.
1566 U.HighWaterMark = NewIsland;
1567 U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc())
1568 .addImm(ID)
1569 .add(CPEMI->getOperand(1))
1570 .addImm(Size);
1571 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
1572 ++NumCPEs;
1574 // Decrement the old entry, and remove it if refcount becomes 0.
1575 decrementCPEReferenceCount(CPI, CPEMI);
1577 // Mark the basic block as aligned as required by the const-pool entry.
1578 NewIsland->setAlignment(getCPEAlign(U.CPEMI));
1580 // Increase the size of the island block to account for the new entry.
1581 BBUtils->adjustBBSize(NewIsland, Size);
1582 BBUtils->adjustBBOffsetsAfter(&*--NewIsland->getIterator());
1584 // Finally, change the CPI in the instruction operand to be ID.
1585 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
1586 if (UserMI->getOperand(i).isCPI()) {
1587 UserMI->getOperand(i).setIndex(ID);
1588 break;
1591 LLVM_DEBUG(
1592 dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
1593 << format(" offset=%#x\n",
1594 BBUtils->getBBInfo()[NewIsland->getNumber()].Offset));
1596 return true;
1599 /// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
1600 /// sizes and offsets of impacted basic blocks.
1601 void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
1602 MachineBasicBlock *CPEBB = CPEMI->getParent();
1603 unsigned Size = CPEMI->getOperand(2).getImm();
1604 CPEMI->eraseFromParent();
1605 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1606 BBUtils->adjustBBSize(CPEBB, -Size);
1607 // All succeeding offsets have the current size value added in, fix this.
1608 if (CPEBB->empty()) {
1609 BBInfo[CPEBB->getNumber()].Size = 0;
1611 // This block no longer needs to be aligned.
1612 CPEBB->setAlignment(Align(1));
1613 } else {
1614 // Entries are sorted by descending alignment, so realign from the front.
1615 CPEBB->setAlignment(getCPEAlign(&*CPEBB->begin()));
1618 BBUtils->adjustBBOffsetsAfter(CPEBB);
1619 // An island has only one predecessor BB and one successor BB. Check if
1620 // this BB's predecessor jumps directly to this BB's successor. This
1621 // shouldn't happen currently.
1622 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
1623 // FIXME: remove the empty blocks after all the work is done?
1626 /// removeUnusedCPEntries - Remove constant pool entries whose refcounts
1627 /// are zero.
1628 bool ARMConstantIslands::removeUnusedCPEntries() {
1629 unsigned MadeChange = false;
1630 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
1631 std::vector<CPEntry> &CPEs = CPEntries[i];
1632 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
1633 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
1634 removeDeadCPEMI(CPEs[j].CPEMI);
1635 CPEs[j].CPEMI = nullptr;
1636 MadeChange = true;
1640 return MadeChange;
1644 /// fixupImmediateBr - Fix up an immediate branch whose destination is too far
1645 /// away to fit in its displacement field.
1646 bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
1647 MachineInstr *MI = Br.MI;
1648 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1650 // Check to see if the DestBB is already in-range.
1651 if (BBUtils->isBBInRange(MI, DestBB, Br.MaxDisp))
1652 return false;
1654 if (!Br.isCond)
1655 return fixupUnconditionalBr(Br);
1656 return fixupConditionalBr(Br);
1659 /// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
1660 /// too far away to fit in its displacement field. If the LR register has been
1661 /// spilled in the epilogue, then we can use BL to implement a far jump.
1662 /// Otherwise, add an intermediate branch instruction to a branch.
1663 bool
1664 ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
1665 MachineInstr *MI = Br.MI;
1666 MachineBasicBlock *MBB = MI->getParent();
1667 if (!isThumb1)
1668 llvm_unreachable("fixupUnconditionalBr is Thumb1 only!");
1670 if (!AFI->isLRSpilled())
1671 report_fatal_error("underestimated function size");
1673 // Use BL to implement far jump.
1674 Br.MaxDisp = (1 << 21) * 2;
1675 MI->setDesc(TII->get(ARM::tBfar));
1676 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1677 BBInfo[MBB->getNumber()].Size += 2;
1678 BBUtils->adjustBBOffsetsAfter(MBB);
1679 ++NumUBrFixed;
1681 LLVM_DEBUG(dbgs() << " Changed B to long jump " << *MI);
1683 return true;
1686 /// fixupConditionalBr - Fix up a conditional branch whose destination is too
1687 /// far away to fit in its displacement field. It is converted to an inverse
1688 /// conditional branch + an unconditional branch to the destination.
1689 bool
1690 ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
1691 MachineInstr *MI = Br.MI;
1692 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1694 // Add an unconditional branch to the destination and invert the branch
1695 // condition to jump over it:
1696 // blt L1
1697 // =>
1698 // bge L2
1699 // b L1
1700 // L2:
1701 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
1702 CC = ARMCC::getOppositeCondition(CC);
1703 Register CCReg = MI->getOperand(2).getReg();
1705 // If the branch is at the end of its MBB and that has a fall-through block,
1706 // direct the updated conditional branch to the fall-through block. Otherwise,
1707 // split the MBB before the next instruction.
1708 MachineBasicBlock *MBB = MI->getParent();
1709 MachineInstr *BMI = &MBB->back();
1710 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
1712 ++NumCBrFixed;
1713 if (BMI != MI) {
1714 if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
1715 BMI->getOpcode() == Br.UncondBr) {
1716 // Last MI in the BB is an unconditional branch. Can we simply invert the
1717 // condition and swap destinations:
1718 // beq L1
1719 // b L2
1720 // =>
1721 // bne L2
1722 // b L1
1723 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
1724 if (BBUtils->isBBInRange(MI, NewDest, Br.MaxDisp)) {
1725 LLVM_DEBUG(
1726 dbgs() << " Invert Bcc condition and swap its destination with "
1727 << *BMI);
1728 BMI->getOperand(0).setMBB(DestBB);
1729 MI->getOperand(0).setMBB(NewDest);
1730 MI->getOperand(1).setImm(CC);
1731 return true;
1736 if (NeedSplit) {
1737 splitBlockBeforeInstr(MI);
1738 // No need for the branch to the next block. We're adding an unconditional
1739 // branch to the destination.
1740 int delta = TII->getInstSizeInBytes(MBB->back());
1741 BBUtils->adjustBBSize(MBB, -delta);
1742 MBB->back().eraseFromParent();
1744 // The conditional successor will be swapped between the BBs after this, so
1745 // update CFG.
1746 MBB->addSuccessor(DestBB);
1747 std::next(MBB->getIterator())->removeSuccessor(DestBB);
1749 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
1751 MachineBasicBlock *NextBB = &*++MBB->getIterator();
1753 LLVM_DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
1754 << " also invert condition and change dest. to "
1755 << printMBBReference(*NextBB) << "\n");
1757 // Insert a new conditional branch and a new unconditional branch.
1758 // Also update the ImmBranch as well as adding a new entry for the new branch.
1759 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
1760 .addMBB(NextBB).addImm(CC).addReg(CCReg);
1761 Br.MI = &MBB->back();
1762 BBUtils->adjustBBSize(MBB, TII->getInstSizeInBytes(MBB->back()));
1763 if (isThumb)
1764 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr))
1765 .addMBB(DestBB)
1766 .add(predOps(ARMCC::AL));
1767 else
1768 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
1769 BBUtils->adjustBBSize(MBB, TII->getInstSizeInBytes(MBB->back()));
1770 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
1771 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
1773 // Remove the old conditional branch. It may or may not still be in MBB.
1774 BBUtils->adjustBBSize(MI->getParent(), -TII->getInstSizeInBytes(*MI));
1775 MI->eraseFromParent();
1776 BBUtils->adjustBBOffsetsAfter(MBB);
1777 return true;
1780 bool ARMConstantIslands::optimizeThumb2Instructions() {
1781 bool MadeChange = false;
1783 // Shrink ADR and LDR from constantpool.
1784 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
1785 CPUser &U = CPUsers[i];
1786 unsigned Opcode = U.MI->getOpcode();
1787 unsigned NewOpc = 0;
1788 unsigned Scale = 1;
1789 unsigned Bits = 0;
1790 switch (Opcode) {
1791 default: break;
1792 case ARM::t2LEApcrel:
1793 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1794 NewOpc = ARM::tLEApcrel;
1795 Bits = 8;
1796 Scale = 4;
1798 break;
1799 case ARM::t2LDRpci:
1800 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1801 NewOpc = ARM::tLDRpci;
1802 Bits = 8;
1803 Scale = 4;
1805 break;
1808 if (!NewOpc)
1809 continue;
1811 unsigned UserOffset = getUserOffset(U);
1812 unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
1814 // Be conservative with inline asm.
1815 if (!U.KnownAlignment)
1816 MaxOffs -= 2;
1818 // FIXME: Check if offset is multiple of scale if scale is not 4.
1819 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
1820 LLVM_DEBUG(dbgs() << "Shrink: " << *U.MI);
1821 U.MI->setDesc(TII->get(NewOpc));
1822 MachineBasicBlock *MBB = U.MI->getParent();
1823 BBUtils->adjustBBSize(MBB, -2);
1824 BBUtils->adjustBBOffsetsAfter(MBB);
1825 ++NumT2CPShrunk;
1826 MadeChange = true;
1830 return MadeChange;
1834 bool ARMConstantIslands::optimizeThumb2Branches() {
1836 auto TryShrinkBranch = [this](ImmBranch &Br) {
1837 unsigned Opcode = Br.MI->getOpcode();
1838 unsigned NewOpc = 0;
1839 unsigned Scale = 1;
1840 unsigned Bits = 0;
1841 switch (Opcode) {
1842 default: break;
1843 case ARM::t2B:
1844 NewOpc = ARM::tB;
1845 Bits = 11;
1846 Scale = 2;
1847 break;
1848 case ARM::t2Bcc:
1849 NewOpc = ARM::tBcc;
1850 Bits = 8;
1851 Scale = 2;
1852 break;
1854 if (NewOpc) {
1855 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
1856 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1857 if (BBUtils->isBBInRange(Br.MI, DestBB, MaxOffs)) {
1858 LLVM_DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
1859 Br.MI->setDesc(TII->get(NewOpc));
1860 MachineBasicBlock *MBB = Br.MI->getParent();
1861 BBUtils->adjustBBSize(MBB, -2);
1862 BBUtils->adjustBBOffsetsAfter(MBB);
1863 ++NumT2BrShrunk;
1864 return true;
1867 return false;
1870 struct ImmCompare {
1871 MachineInstr* MI = nullptr;
1872 unsigned NewOpc = 0;
1875 auto FindCmpForCBZ = [this](ImmBranch &Br, ImmCompare &ImmCmp,
1876 MachineBasicBlock *DestBB) {
1877 ImmCmp.MI = nullptr;
1878 ImmCmp.NewOpc = 0;
1880 // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
1881 // so this transformation is not safe.
1882 if (!Br.MI->killsRegister(ARM::CPSR))
1883 return false;
1885 Register PredReg;
1886 unsigned NewOpc = 0;
1887 ARMCC::CondCodes Pred = getInstrPredicate(*Br.MI, PredReg);
1888 if (Pred == ARMCC::EQ)
1889 NewOpc = ARM::tCBZ;
1890 else if (Pred == ARMCC::NE)
1891 NewOpc = ARM::tCBNZ;
1892 else
1893 return false;
1895 // Check if the distance is within 126. Subtract starting offset by 2
1896 // because the cmp will be eliminated.
1897 unsigned BrOffset = BBUtils->getOffsetOf(Br.MI) + 4 - 2;
1898 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1899 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1900 if (BrOffset >= DestOffset || (DestOffset - BrOffset) > 126)
1901 return false;
1903 // Search backwards to find a tCMPi8
1904 auto *TRI = STI->getRegisterInfo();
1905 MachineInstr *CmpMI = findCMPToFoldIntoCBZ(Br.MI, TRI);
1906 if (!CmpMI || CmpMI->getOpcode() != ARM::tCMPi8)
1907 return false;
1909 ImmCmp.MI = CmpMI;
1910 ImmCmp.NewOpc = NewOpc;
1911 return true;
1914 auto TryConvertToLE = [this](ImmBranch &Br, ImmCompare &Cmp) {
1915 if (Br.MI->getOpcode() != ARM::t2Bcc || !STI->hasLOB() ||
1916 STI->hasMinSize())
1917 return false;
1919 MachineBasicBlock *MBB = Br.MI->getParent();
1920 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1921 if (BBUtils->getOffsetOf(MBB) < BBUtils->getOffsetOf(DestBB) ||
1922 !BBUtils->isBBInRange(Br.MI, DestBB, 4094))
1923 return false;
1925 if (!DT->dominates(DestBB, MBB))
1926 return false;
1928 // We queried for the CBN?Z opcode based upon the 'ExitBB', the opposite
1929 // target of Br. So now we need to reverse the condition.
1930 Cmp.NewOpc = Cmp.NewOpc == ARM::tCBZ ? ARM::tCBNZ : ARM::tCBZ;
1932 MachineInstrBuilder MIB = BuildMI(*MBB, Br.MI, Br.MI->getDebugLoc(),
1933 TII->get(ARM::t2LE));
1934 // Swapped a t2Bcc for a t2LE, so no need to update the size of the block.
1935 MIB.add(Br.MI->getOperand(0));
1936 Br.MI->eraseFromParent();
1937 Br.MI = MIB;
1938 ++NumLEInserted;
1939 return true;
1942 bool MadeChange = false;
1944 // The order in which branches appear in ImmBranches is approximately their
1945 // order within the function body. By visiting later branches first, we reduce
1946 // the distance between earlier forward branches and their targets, making it
1947 // more likely that the cbn?z optimization, which can only apply to forward
1948 // branches, will succeed.
1949 for (ImmBranch &Br : reverse(ImmBranches)) {
1950 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1951 MachineBasicBlock *MBB = Br.MI->getParent();
1952 MachineBasicBlock *ExitBB = &MBB->back() == Br.MI ?
1953 MBB->getFallThrough() :
1954 MBB->back().getOperand(0).getMBB();
1956 ImmCompare Cmp;
1957 if (FindCmpForCBZ(Br, Cmp, ExitBB) && TryConvertToLE(Br, Cmp)) {
1958 DestBB = ExitBB;
1959 MadeChange = true;
1960 } else {
1961 FindCmpForCBZ(Br, Cmp, DestBB);
1962 MadeChange |= TryShrinkBranch(Br);
1965 unsigned Opcode = Br.MI->getOpcode();
1966 if ((Opcode != ARM::tBcc && Opcode != ARM::t2LE) || !Cmp.NewOpc)
1967 continue;
1969 Register Reg = Cmp.MI->getOperand(0).getReg();
1971 // Check for Kill flags on Reg. If they are present remove them and set kill
1972 // on the new CBZ.
1973 auto *TRI = STI->getRegisterInfo();
1974 MachineBasicBlock::iterator KillMI = Br.MI;
1975 bool RegKilled = false;
1976 do {
1977 --KillMI;
1978 if (KillMI->killsRegister(Reg, TRI)) {
1979 KillMI->clearRegisterKills(Reg, TRI);
1980 RegKilled = true;
1981 break;
1983 } while (KillMI != Cmp.MI);
1985 // Create the new CBZ/CBNZ
1986 LLVM_DEBUG(dbgs() << "Fold: " << *Cmp.MI << " and: " << *Br.MI);
1987 MachineInstr *NewBR =
1988 BuildMI(*MBB, Br.MI, Br.MI->getDebugLoc(), TII->get(Cmp.NewOpc))
1989 .addReg(Reg, getKillRegState(RegKilled) |
1990 getRegState(Cmp.MI->getOperand(0)))
1991 .addMBB(DestBB, Br.MI->getOperand(0).getTargetFlags());
1993 Cmp.MI->eraseFromParent();
1995 if (Br.MI->getOpcode() == ARM::tBcc) {
1996 Br.MI->eraseFromParent();
1997 Br.MI = NewBR;
1998 BBUtils->adjustBBSize(MBB, -2);
1999 } else if (MBB->back().getOpcode() != ARM::t2LE) {
2000 // An LE has been generated, but it's not the terminator - that is an
2001 // unconditional branch. However, the logic has now been reversed with the
2002 // CBN?Z being the conditional branch and the LE being the unconditional
2003 // branch. So this means we can remove the redundant unconditional branch
2004 // at the end of the block.
2005 MachineInstr *LastMI = &MBB->back();
2006 BBUtils->adjustBBSize(MBB, -LastMI->getDesc().getSize());
2007 LastMI->eraseFromParent();
2009 BBUtils->adjustBBOffsetsAfter(MBB);
2010 ++NumCBZ;
2011 MadeChange = true;
2014 return MadeChange;
2017 static bool isSimpleIndexCalc(MachineInstr &I, unsigned EntryReg,
2018 unsigned BaseReg) {
2019 if (I.getOpcode() != ARM::t2ADDrs)
2020 return false;
2022 if (I.getOperand(0).getReg() != EntryReg)
2023 return false;
2025 if (I.getOperand(1).getReg() != BaseReg)
2026 return false;
2028 // FIXME: what about CC and IdxReg?
2029 return true;
2032 /// While trying to form a TBB/TBH instruction, we may (if the table
2033 /// doesn't immediately follow the BR_JT) need access to the start of the
2034 /// jump-table. We know one instruction that produces such a register; this
2035 /// function works out whether that definition can be preserved to the BR_JT,
2036 /// possibly by removing an intervening addition (which is usually needed to
2037 /// calculate the actual entry to jump to).
2038 bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI,
2039 MachineInstr *LEAMI,
2040 unsigned &DeadSize,
2041 bool &CanDeleteLEA,
2042 bool &BaseRegKill) {
2043 if (JumpMI->getParent() != LEAMI->getParent())
2044 return false;
2046 // Now we hope that we have at least these instructions in the basic block:
2047 // BaseReg = t2LEA ...
2048 // [...]
2049 // EntryReg = t2ADDrs BaseReg, ...
2050 // [...]
2051 // t2BR_JT EntryReg
2053 // We have to be very conservative about what we recognise here though. The
2054 // main perturbing factors to watch out for are:
2055 // + Spills at any point in the chain: not direct problems but we would
2056 // expect a blocking Def of the spilled register so in practice what we
2057 // can do is limited.
2058 // + EntryReg == BaseReg: this is the one situation we should allow a Def
2059 // of BaseReg, but only if the t2ADDrs can be removed.
2060 // + Some instruction other than t2ADDrs computing the entry. Not seen in
2061 // the wild, but we should be careful.
2062 Register EntryReg = JumpMI->getOperand(0).getReg();
2063 Register BaseReg = LEAMI->getOperand(0).getReg();
2065 CanDeleteLEA = true;
2066 BaseRegKill = false;
2067 MachineInstr *RemovableAdd = nullptr;
2068 MachineBasicBlock::iterator I(LEAMI);
2069 for (++I; &*I != JumpMI; ++I) {
2070 if (isSimpleIndexCalc(*I, EntryReg, BaseReg)) {
2071 RemovableAdd = &*I;
2072 break;
2075 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
2076 const MachineOperand &MO = I->getOperand(K);
2077 if (!MO.isReg() || !MO.getReg())
2078 continue;
2079 if (MO.isDef() && MO.getReg() == BaseReg)
2080 return false;
2081 if (MO.isUse() && MO.getReg() == BaseReg) {
2082 BaseRegKill = BaseRegKill || MO.isKill();
2083 CanDeleteLEA = false;
2088 if (!RemovableAdd)
2089 return true;
2091 // Check the add really is removable, and that nothing else in the block
2092 // clobbers BaseReg.
2093 for (++I; &*I != JumpMI; ++I) {
2094 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
2095 const MachineOperand &MO = I->getOperand(K);
2096 if (!MO.isReg() || !MO.getReg())
2097 continue;
2098 if (MO.isDef() && MO.getReg() == BaseReg)
2099 return false;
2100 if (MO.isUse() && MO.getReg() == EntryReg)
2101 RemovableAdd = nullptr;
2105 if (RemovableAdd) {
2106 RemovableAdd->eraseFromParent();
2107 DeadSize += isThumb2 ? 4 : 2;
2108 } else if (BaseReg == EntryReg) {
2109 // The add wasn't removable, but clobbered the base for the TBB. So we can't
2110 // preserve it.
2111 return false;
2114 // We reached the end of the block without seeing another definition of
2115 // BaseReg (except, possibly the t2ADDrs, which was removed). BaseReg can be
2116 // used in the TBB/TBH if necessary.
2117 return true;
2120 /// Returns whether CPEMI is the first instruction in the block
2121 /// immediately following JTMI (assumed to be a TBB or TBH terminator). If so,
2122 /// we can switch the first register to PC and usually remove the address
2123 /// calculation that preceded it.
2124 static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
2125 MachineFunction::iterator MBB = JTMI->getParent()->getIterator();
2126 MachineFunction *MF = MBB->getParent();
2127 ++MBB;
2129 return MBB != MF->end() && !MBB->empty() && &*MBB->begin() == CPEMI;
2132 static void RemoveDeadAddBetweenLEAAndJT(MachineInstr *LEAMI,
2133 MachineInstr *JumpMI,
2134 unsigned &DeadSize) {
2135 // Remove a dead add between the LEA and JT, which used to compute EntryReg,
2136 // but the JT now uses PC. Finds the last ADD (if any) that def's EntryReg
2137 // and is not clobbered / used.
2138 MachineInstr *RemovableAdd = nullptr;
2139 Register EntryReg = JumpMI->getOperand(0).getReg();
2141 // Find the last ADD to set EntryReg
2142 MachineBasicBlock::iterator I(LEAMI);
2143 for (++I; &*I != JumpMI; ++I) {
2144 if (I->getOpcode() == ARM::t2ADDrs && I->getOperand(0).getReg() == EntryReg)
2145 RemovableAdd = &*I;
2148 if (!RemovableAdd)
2149 return;
2151 // Ensure EntryReg is not clobbered or used.
2152 MachineBasicBlock::iterator J(RemovableAdd);
2153 for (++J; &*J != JumpMI; ++J) {
2154 for (unsigned K = 0, E = J->getNumOperands(); K != E; ++K) {
2155 const MachineOperand &MO = J->getOperand(K);
2156 if (!MO.isReg() || !MO.getReg())
2157 continue;
2158 if (MO.isDef() && MO.getReg() == EntryReg)
2159 return;
2160 if (MO.isUse() && MO.getReg() == EntryReg)
2161 return;
2165 LLVM_DEBUG(dbgs() << "Removing Dead Add: " << *RemovableAdd);
2166 RemovableAdd->eraseFromParent();
2167 DeadSize += 4;
2170 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
2171 /// jumptables when it's possible.
2172 bool ARMConstantIslands::optimizeThumb2JumpTables() {
2173 bool MadeChange = false;
2175 // FIXME: After the tables are shrunk, can we get rid some of the
2176 // constantpool tables?
2177 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2178 if (!MJTI) return false;
2180 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2181 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2182 MachineInstr *MI = T2JumpTables[i];
2183 const MCInstrDesc &MCID = MI->getDesc();
2184 unsigned NumOps = MCID.getNumOperands();
2185 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2186 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2187 unsigned JTI = JTOP.getIndex();
2188 assert(JTI < JT.size());
2190 bool ByteOk = true;
2191 bool HalfWordOk = true;
2192 unsigned JTOffset = BBUtils->getOffsetOf(MI) + 4;
2193 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2194 BBInfoVector &BBInfo = BBUtils->getBBInfo();
2195 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2196 MachineBasicBlock *MBB = JTBBs[j];
2197 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
2198 // Negative offset is not ok. FIXME: We should change BB layout to make
2199 // sure all the branches are forward.
2200 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
2201 ByteOk = false;
2202 unsigned TBHLimit = ((1<<16)-1)*2;
2203 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
2204 HalfWordOk = false;
2205 if (!ByteOk && !HalfWordOk)
2206 break;
2209 if (!ByteOk && !HalfWordOk)
2210 continue;
2212 CPUser &User = CPUsers[JumpTableUserIndices[JTI]];
2213 MachineBasicBlock *MBB = MI->getParent();
2214 if (!MI->getOperand(0).isKill()) // FIXME: needed now?
2215 continue;
2217 unsigned DeadSize = 0;
2218 bool CanDeleteLEA = false;
2219 bool BaseRegKill = false;
2221 unsigned IdxReg = ~0U;
2222 bool IdxRegKill = true;
2223 if (isThumb2) {
2224 IdxReg = MI->getOperand(1).getReg();
2225 IdxRegKill = MI->getOperand(1).isKill();
2227 bool PreservedBaseReg =
2228 preserveBaseRegister(MI, User.MI, DeadSize, CanDeleteLEA, BaseRegKill);
2229 if (!jumpTableFollowsTB(MI, User.CPEMI) && !PreservedBaseReg)
2230 continue;
2231 } else {
2232 // We're in thumb-1 mode, so we must have something like:
2233 // %idx = tLSLri %idx, 2
2234 // %base = tLEApcrelJT
2235 // %t = tLDRr %base, %idx
2236 Register BaseReg = User.MI->getOperand(0).getReg();
2238 if (User.MI->getIterator() == User.MI->getParent()->begin())
2239 continue;
2240 MachineInstr *Shift = User.MI->getPrevNode();
2241 if (Shift->getOpcode() != ARM::tLSLri ||
2242 Shift->getOperand(3).getImm() != 2 ||
2243 !Shift->getOperand(2).isKill())
2244 continue;
2245 IdxReg = Shift->getOperand(2).getReg();
2246 Register ShiftedIdxReg = Shift->getOperand(0).getReg();
2248 // It's important that IdxReg is live until the actual TBB/TBH. Most of
2249 // the range is checked later, but the LEA might still clobber it and not
2250 // actually get removed.
2251 if (BaseReg == IdxReg && !jumpTableFollowsTB(MI, User.CPEMI))
2252 continue;
2254 MachineInstr *Load = User.MI->getNextNode();
2255 if (Load->getOpcode() != ARM::tLDRr)
2256 continue;
2257 if (Load->getOperand(1).getReg() != BaseReg ||
2258 Load->getOperand(2).getReg() != ShiftedIdxReg ||
2259 !Load->getOperand(2).isKill())
2260 continue;
2262 // If we're in PIC mode, there should be another ADD following.
2263 auto *TRI = STI->getRegisterInfo();
2265 // %base cannot be redefined after the load as it will appear before
2266 // TBB/TBH like:
2267 // %base =
2268 // %base =
2269 // tBB %base, %idx
2270 if (registerDefinedBetween(BaseReg, Load->getNextNode(), MBB->end(), TRI))
2271 continue;
2273 if (isPositionIndependentOrROPI) {
2274 MachineInstr *Add = Load->getNextNode();
2275 if (Add->getOpcode() != ARM::tADDrr ||
2276 Add->getOperand(2).getReg() != BaseReg ||
2277 Add->getOperand(3).getReg() != Load->getOperand(0).getReg() ||
2278 !Add->getOperand(3).isKill())
2279 continue;
2280 if (Add->getOperand(0).getReg() != MI->getOperand(0).getReg())
2281 continue;
2282 if (registerDefinedBetween(IdxReg, Add->getNextNode(), MI, TRI))
2283 // IdxReg gets redefined in the middle of the sequence.
2284 continue;
2285 Add->eraseFromParent();
2286 DeadSize += 2;
2287 } else {
2288 if (Load->getOperand(0).getReg() != MI->getOperand(0).getReg())
2289 continue;
2290 if (registerDefinedBetween(IdxReg, Load->getNextNode(), MI, TRI))
2291 // IdxReg gets redefined in the middle of the sequence.
2292 continue;
2295 // Now safe to delete the load and lsl. The LEA will be removed later.
2296 CanDeleteLEA = true;
2297 Shift->eraseFromParent();
2298 Load->eraseFromParent();
2299 DeadSize += 4;
2302 LLVM_DEBUG(dbgs() << "Shrink JT: " << *MI);
2303 MachineInstr *CPEMI = User.CPEMI;
2304 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
2305 if (!isThumb2)
2306 Opc = ByteOk ? ARM::tTBB_JT : ARM::tTBH_JT;
2308 MachineBasicBlock::iterator MI_JT = MI;
2309 MachineInstr *NewJTMI =
2310 BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
2311 .addReg(User.MI->getOperand(0).getReg(),
2312 getKillRegState(BaseRegKill))
2313 .addReg(IdxReg, getKillRegState(IdxRegKill))
2314 .addJumpTableIndex(JTI, JTOP.getTargetFlags())
2315 .addImm(CPEMI->getOperand(0).getImm());
2316 LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI);
2318 unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
2319 CPEMI->setDesc(TII->get(JTOpc));
2321 if (jumpTableFollowsTB(MI, User.CPEMI)) {
2322 NewJTMI->getOperand(0).setReg(ARM::PC);
2323 NewJTMI->getOperand(0).setIsKill(false);
2325 if (CanDeleteLEA) {
2326 if (isThumb2)
2327 RemoveDeadAddBetweenLEAAndJT(User.MI, MI, DeadSize);
2329 User.MI->eraseFromParent();
2330 DeadSize += isThumb2 ? 4 : 2;
2332 // The LEA was eliminated, the TBB instruction becomes the only new user
2333 // of the jump table.
2334 User.MI = NewJTMI;
2335 User.MaxDisp = 4;
2336 User.NegOk = false;
2337 User.IsSoImm = false;
2338 User.KnownAlignment = false;
2339 } else {
2340 // The LEA couldn't be eliminated, so we must add another CPUser to
2341 // record the TBB or TBH use.
2342 int CPEntryIdx = JumpTableEntryIndices[JTI];
2343 auto &CPEs = CPEntries[CPEntryIdx];
2344 auto Entry =
2345 find_if(CPEs, [&](CPEntry &E) { return E.CPEMI == User.CPEMI; });
2346 ++Entry->RefCount;
2347 CPUsers.emplace_back(CPUser(NewJTMI, User.CPEMI, 4, false, false));
2351 unsigned NewSize = TII->getInstSizeInBytes(*NewJTMI);
2352 unsigned OrigSize = TII->getInstSizeInBytes(*MI);
2353 MI->eraseFromParent();
2355 int Delta = OrigSize - NewSize + DeadSize;
2356 BBInfo[MBB->getNumber()].Size -= Delta;
2357 BBUtils->adjustBBOffsetsAfter(MBB);
2359 ++NumTBs;
2360 MadeChange = true;
2363 return MadeChange;
2366 /// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
2367 /// jump tables always branch forwards, since that's what tbb and tbh need.
2368 bool ARMConstantIslands::reorderThumb2JumpTables() {
2369 bool MadeChange = false;
2371 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2372 if (!MJTI) return false;
2374 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2375 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2376 MachineInstr *MI = T2JumpTables[i];
2377 const MCInstrDesc &MCID = MI->getDesc();
2378 unsigned NumOps = MCID.getNumOperands();
2379 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2380 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2381 unsigned JTI = JTOP.getIndex();
2382 assert(JTI < JT.size());
2384 // We prefer if target blocks for the jump table come after the jump
2385 // instruction so we can use TB[BH]. Loop through the target blocks
2386 // and try to adjust them such that that's true.
2387 int JTNumber = MI->getParent()->getNumber();
2388 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2389 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2390 MachineBasicBlock *MBB = JTBBs[j];
2391 int DTNumber = MBB->getNumber();
2393 if (DTNumber < JTNumber) {
2394 // The destination precedes the switch. Try to move the block forward
2395 // so we have a positive offset.
2396 MachineBasicBlock *NewBB =
2397 adjustJTTargetBlockForward(MBB, MI->getParent());
2398 if (NewBB)
2399 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
2400 MadeChange = true;
2405 return MadeChange;
2408 MachineBasicBlock *ARMConstantIslands::
2409 adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
2410 // If the destination block is terminated by an unconditional branch,
2411 // try to move it; otherwise, create a new block following the jump
2412 // table that branches back to the actual target. This is a very simple
2413 // heuristic. FIXME: We can definitely improve it.
2414 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
2415 SmallVector<MachineOperand, 4> Cond;
2416 SmallVector<MachineOperand, 4> CondPrior;
2417 MachineFunction::iterator BBi = BB->getIterator();
2418 MachineFunction::iterator OldPrior = std::prev(BBi);
2419 MachineFunction::iterator OldNext = std::next(BBi);
2421 // If the block terminator isn't analyzable, don't try to move the block
2422 bool B = TII->analyzeBranch(*BB, TBB, FBB, Cond);
2424 // If the block ends in an unconditional branch, move it. The prior block
2425 // has to have an analyzable terminator for us to move this one. Be paranoid
2426 // and make sure we're not trying to move the entry block of the function.
2427 if (!B && Cond.empty() && BB != &MF->front() &&
2428 !TII->analyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
2429 BB->moveAfter(JTBB);
2430 OldPrior->updateTerminator(BB);
2431 BB->updateTerminator(OldNext != MF->end() ? &*OldNext : nullptr);
2432 // Update numbering to account for the block being moved.
2433 MF->RenumberBlocks();
2434 ++NumJTMoved;
2435 return nullptr;
2438 // Create a new MBB for the code after the jump BB.
2439 MachineBasicBlock *NewBB =
2440 MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
2441 MachineFunction::iterator MBBI = ++JTBB->getIterator();
2442 MF->insert(MBBI, NewBB);
2444 // Copy live-in information to new block.
2445 for (const MachineBasicBlock::RegisterMaskPair &RegMaskPair : BB->liveins())
2446 NewBB->addLiveIn(RegMaskPair);
2448 // Add an unconditional branch from NewBB to BB.
2449 // There doesn't seem to be meaningful DebugInfo available; this doesn't
2450 // correspond directly to anything in the source.
2451 if (isThumb2)
2452 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B))
2453 .addMBB(BB)
2454 .add(predOps(ARMCC::AL));
2455 else
2456 BuildMI(NewBB, DebugLoc(), TII->get(ARM::tB))
2457 .addMBB(BB)
2458 .add(predOps(ARMCC::AL));
2460 // Update internal data structures to account for the newly inserted MBB.
2461 MF->RenumberBlocks(NewBB);
2463 // Update the CFG.
2464 NewBB->addSuccessor(BB);
2465 JTBB->replaceSuccessor(BB, NewBB);
2467 ++NumJTInserted;
2468 return NewBB;
2471 /// createARMConstantIslandPass - returns an instance of the constpool
2472 /// island pass.
2473 FunctionPass *llvm::createARMConstantIslandPass() {
2474 return new ARMConstantIslands();
2477 INITIALIZE_PASS(ARMConstantIslands, "arm-cp-islands", ARM_CP_ISLANDS_OPT_NAME,
2478 false, false)