[AMDGPU] Check for immediate SrcC in mfma in AsmParser
[llvm-core.git] / lib / Target / ARM / ARMConstantIslandPass.cpp
blob5283bb52ee47d265030167391e74604361f339ed
1 //===- ARMConstantIslandPass.cpp - ARM constant islands -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that splits the constant pool up into 'islands'
10 // which are scattered through-out the function. This is required due to the
11 // limited pc-relative displacements that ARM has.
13 //===----------------------------------------------------------------------===//
15 #include "ARM.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBasicBlockInfo.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
20 #include "MCTargetDesc/ARMBaseInfo.h"
21 #include "Thumb2InstrInfo.h"
22 #include "Utils/ARMBaseInfo.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/ADT/StringRef.h"
29 #include "llvm/CodeGen/LivePhysRegs.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineConstantPool.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineFunctionPass.h"
34 #include "llvm/CodeGen/MachineInstr.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineOperand.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/Config/llvm-config.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/MC/MCInstrDesc.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Compiler.h"
45 #include "llvm/Support/Debug.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Support/Format.h"
48 #include "llvm/Support/MathExtras.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <cstdint>
53 #include <iterator>
54 #include <utility>
55 #include <vector>
57 using namespace llvm;
59 #define DEBUG_TYPE "arm-cp-islands"
61 #define ARM_CP_ISLANDS_OPT_NAME \
62 "ARM constant island placement and branch shortening pass"
63 STATISTIC(NumCPEs, "Number of constpool entries");
64 STATISTIC(NumSplit, "Number of uncond branches inserted");
65 STATISTIC(NumCBrFixed, "Number of cond branches fixed");
66 STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
67 STATISTIC(NumTBs, "Number of table branches generated");
68 STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk");
69 STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
70 STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed");
71 STATISTIC(NumJTMoved, "Number of jump table destination blocks moved");
72 STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
74 static cl::opt<bool>
75 AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
76 cl::desc("Adjust basic block layout to better use TB[BH]"));
78 static cl::opt<unsigned>
79 CPMaxIteration("arm-constant-island-max-iteration", cl::Hidden, cl::init(30),
80 cl::desc("The max number of iteration for converge"));
82 static cl::opt<bool> SynthesizeThumb1TBB(
83 "arm-synthesize-thumb-1-tbb", cl::Hidden, cl::init(true),
84 cl::desc("Use compressed jump tables in Thumb-1 by synthesizing an "
85 "equivalent to the TBB/TBH instructions"));
87 namespace {
89 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
90 /// requires constant pool entries to be scattered among the instructions
91 /// inside a function. To do this, it completely ignores the normal LLVM
92 /// constant pool; instead, it places constants wherever it feels like with
93 /// special instructions.
94 ///
95 /// The terminology used in this pass includes:
96 /// Islands - Clumps of constants placed in the function.
97 /// Water - Potential places where an island could be formed.
98 /// CPE - A constant pool entry that has been placed somewhere, which
99 /// tracks a list of users.
100 class ARMConstantIslands : public MachineFunctionPass {
101 std::unique_ptr<ARMBasicBlockUtils> BBUtils = nullptr;
103 /// WaterList - A sorted list of basic blocks where islands could be placed
104 /// (i.e. blocks that don't fall through to the following block, due
105 /// to a return, unreachable, or unconditional branch).
106 std::vector<MachineBasicBlock*> WaterList;
108 /// NewWaterList - The subset of WaterList that was created since the
109 /// previous iteration by inserting unconditional branches.
110 SmallSet<MachineBasicBlock*, 4> NewWaterList;
112 using water_iterator = std::vector<MachineBasicBlock *>::iterator;
114 /// CPUser - One user of a constant pool, keeping the machine instruction
115 /// pointer, the constant pool being referenced, and the max displacement
116 /// allowed from the instruction to the CP. The HighWaterMark records the
117 /// highest basic block where a new CPEntry can be placed. To ensure this
118 /// pass terminates, the CP entries are initially placed at the end of the
119 /// function and then move monotonically to lower addresses. The
120 /// exception to this rule is when the current CP entry for a particular
121 /// CPUser is out of range, but there is another CP entry for the same
122 /// constant value in range. We want to use the existing in-range CP
123 /// entry, but if it later moves out of range, the search for new water
124 /// should resume where it left off. The HighWaterMark is used to record
125 /// that point.
126 struct CPUser {
127 MachineInstr *MI;
128 MachineInstr *CPEMI;
129 MachineBasicBlock *HighWaterMark;
130 unsigned MaxDisp;
131 bool NegOk;
132 bool IsSoImm;
133 bool KnownAlignment = false;
135 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
136 bool neg, bool soimm)
137 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm) {
138 HighWaterMark = CPEMI->getParent();
141 /// getMaxDisp - Returns the maximum displacement supported by MI.
142 /// Correct for unknown alignment.
143 /// Conservatively subtract 2 bytes to handle weird alignment effects.
144 unsigned getMaxDisp() const {
145 return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2;
149 /// CPUsers - Keep track of all of the machine instructions that use various
150 /// constant pools and their max displacement.
151 std::vector<CPUser> CPUsers;
153 /// CPEntry - One per constant pool entry, keeping the machine instruction
154 /// pointer, the constpool index, and the number of CPUser's which
155 /// reference this entry.
156 struct CPEntry {
157 MachineInstr *CPEMI;
158 unsigned CPI;
159 unsigned RefCount;
161 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
162 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
165 /// CPEntries - Keep track of all of the constant pool entry machine
166 /// instructions. For each original constpool index (i.e. those that existed
167 /// upon entry to this pass), it keeps a vector of entries. Original
168 /// elements are cloned as we go along; the clones are put in the vector of
169 /// the original element, but have distinct CPIs.
171 /// The first half of CPEntries contains generic constants, the second half
172 /// contains jump tables. Use getCombinedIndex on a generic CPEMI to look up
173 /// which vector it will be in here.
174 std::vector<std::vector<CPEntry>> CPEntries;
176 /// Maps a JT index to the offset in CPEntries containing copies of that
177 /// table. The equivalent map for a CONSTPOOL_ENTRY is the identity.
178 DenseMap<int, int> JumpTableEntryIndices;
180 /// Maps a JT index to the LEA that actually uses the index to calculate its
181 /// base address.
182 DenseMap<int, int> JumpTableUserIndices;
184 /// ImmBranch - One per immediate branch, keeping the machine instruction
185 /// pointer, conditional or unconditional, the max displacement,
186 /// and (if isCond is true) the corresponding unconditional branch
187 /// opcode.
188 struct ImmBranch {
189 MachineInstr *MI;
190 unsigned MaxDisp : 31;
191 bool isCond : 1;
192 unsigned UncondBr;
194 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, unsigned ubr)
195 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
198 /// ImmBranches - Keep track of all the immediate branch instructions.
199 std::vector<ImmBranch> ImmBranches;
201 /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
202 SmallVector<MachineInstr*, 4> PushPopMIs;
204 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
205 SmallVector<MachineInstr*, 4> T2JumpTables;
207 /// HasFarJump - True if any far jump instruction has been emitted during
208 /// the branch fix up pass.
209 bool HasFarJump;
211 MachineFunction *MF;
212 MachineConstantPool *MCP;
213 const ARMBaseInstrInfo *TII;
214 const ARMSubtarget *STI;
215 ARMFunctionInfo *AFI;
216 bool isThumb;
217 bool isThumb1;
218 bool isThumb2;
219 bool isPositionIndependentOrROPI;
221 public:
222 static char ID;
224 ARMConstantIslands() : MachineFunctionPass(ID) {}
226 bool runOnMachineFunction(MachineFunction &MF) override;
228 MachineFunctionProperties getRequiredProperties() const override {
229 return MachineFunctionProperties().set(
230 MachineFunctionProperties::Property::NoVRegs);
233 StringRef getPassName() const override {
234 return ARM_CP_ISLANDS_OPT_NAME;
237 private:
238 void doInitialConstPlacement(std::vector<MachineInstr *> &CPEMIs);
239 void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
240 bool BBHasFallthrough(MachineBasicBlock *MBB);
241 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
242 unsigned getCPELogAlign(const MachineInstr *CPEMI);
243 void scanFunctionJumpTables();
244 void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
245 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
246 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
247 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
248 unsigned getCombinedIndex(const MachineInstr *CPEMI);
249 int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
250 bool findAvailableWater(CPUser&U, unsigned UserOffset,
251 water_iterator &WaterIter, bool CloserWater);
252 void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
253 MachineBasicBlock *&NewMBB);
254 bool handleConstantPoolUser(unsigned CPUserIndex, bool CloserWater);
255 void removeDeadCPEMI(MachineInstr *CPEMI);
256 bool removeUnusedCPEntries();
257 bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
258 MachineInstr *CPEMI, unsigned Disp, bool NegOk,
259 bool DoDump = false);
260 bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
261 CPUser &U, unsigned &Growth);
262 bool fixupImmediateBr(ImmBranch &Br);
263 bool fixupConditionalBr(ImmBranch &Br);
264 bool fixupUnconditionalBr(ImmBranch &Br);
265 bool undoLRSpillRestore();
266 bool optimizeThumb2Instructions();
267 bool optimizeThumb2Branches();
268 bool reorderThumb2JumpTables();
269 bool preserveBaseRegister(MachineInstr *JumpMI, MachineInstr *LEAMI,
270 unsigned &DeadSize, bool &CanDeleteLEA,
271 bool &BaseRegKill);
272 bool optimizeThumb2JumpTables();
273 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
274 MachineBasicBlock *JTBB);
276 unsigned getUserOffset(CPUser&) const;
277 void dumpBBs();
278 void verify();
280 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
281 unsigned Disp, bool NegativeOK, bool IsSoImm = false);
282 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
283 const CPUser &U) {
284 return isOffsetInRange(UserOffset, TrialOffset,
285 U.getMaxDisp(), U.NegOk, U.IsSoImm);
289 } // end anonymous namespace
291 char ARMConstantIslands::ID = 0;
293 /// verify - check BBOffsets, BBSizes, alignment of islands
294 void ARMConstantIslands::verify() {
295 #ifndef NDEBUG
296 BBInfoVector &BBInfo = BBUtils->getBBInfo();
297 assert(std::is_sorted(MF->begin(), MF->end(),
298 [&BBInfo](const MachineBasicBlock &LHS,
299 const MachineBasicBlock &RHS) {
300 return BBInfo[LHS.getNumber()].postOffset() <
301 BBInfo[RHS.getNumber()].postOffset();
302 }));
303 LLVM_DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
304 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
305 CPUser &U = CPUsers[i];
306 unsigned UserOffset = getUserOffset(U);
307 // Verify offset using the real max displacement without the safety
308 // adjustment.
309 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
310 /* DoDump = */ true)) {
311 LLVM_DEBUG(dbgs() << "OK\n");
312 continue;
314 LLVM_DEBUG(dbgs() << "Out of range.\n");
315 dumpBBs();
316 LLVM_DEBUG(MF->dump());
317 llvm_unreachable("Constant pool entry out of range!");
319 #endif
322 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
323 /// print block size and offset information - debugging
324 LLVM_DUMP_METHOD void ARMConstantIslands::dumpBBs() {
325 BBInfoVector &BBInfo = BBUtils->getBBInfo();
326 LLVM_DEBUG({
327 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
328 const BasicBlockInfo &BBI = BBInfo[J];
329 dbgs() << format("%08x %bb.%u\t", BBI.Offset, J)
330 << " kb=" << unsigned(BBI.KnownBits)
331 << " ua=" << unsigned(BBI.Unalign)
332 << " pa=" << unsigned(BBI.PostAlign)
333 << format(" size=%#x\n", BBInfo[J].Size);
337 #endif
339 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
340 MF = &mf;
341 MCP = mf.getConstantPool();
342 BBUtils = std::unique_ptr<ARMBasicBlockUtils>(new ARMBasicBlockUtils(mf));
344 LLVM_DEBUG(dbgs() << "***** ARMConstantIslands: "
345 << MCP->getConstants().size() << " CP entries, aligned to "
346 << MCP->getConstantPoolAlignment() << " bytes *****\n");
348 STI = &static_cast<const ARMSubtarget &>(MF->getSubtarget());
349 TII = STI->getInstrInfo();
350 isPositionIndependentOrROPI =
351 STI->getTargetLowering()->isPositionIndependent() || STI->isROPI();
352 AFI = MF->getInfo<ARMFunctionInfo>();
354 isThumb = AFI->isThumbFunction();
355 isThumb1 = AFI->isThumb1OnlyFunction();
356 isThumb2 = AFI->isThumb2Function();
358 HasFarJump = false;
359 bool GenerateTBB = isThumb2 || (isThumb1 && SynthesizeThumb1TBB);
361 // Renumber all of the machine basic blocks in the function, guaranteeing that
362 // the numbers agree with the position of the block in the function.
363 MF->RenumberBlocks();
365 // Try to reorder and otherwise adjust the block layout to make good use
366 // of the TB[BH] instructions.
367 bool MadeChange = false;
368 if (GenerateTBB && AdjustJumpTableBlocks) {
369 scanFunctionJumpTables();
370 MadeChange |= reorderThumb2JumpTables();
371 // Data is out of date, so clear it. It'll be re-computed later.
372 T2JumpTables.clear();
373 // Blocks may have shifted around. Keep the numbering up to date.
374 MF->RenumberBlocks();
377 // Perform the initial placement of the constant pool entries. To start with,
378 // we put them all at the end of the function.
379 std::vector<MachineInstr*> CPEMIs;
380 if (!MCP->isEmpty())
381 doInitialConstPlacement(CPEMIs);
383 if (MF->getJumpTableInfo())
384 doInitialJumpTablePlacement(CPEMIs);
386 /// The next UID to take is the first unused one.
387 AFI->initPICLabelUId(CPEMIs.size());
389 // Do the initial scan of the function, building up information about the
390 // sizes of each block, the location of all the water, and finding all of the
391 // constant pool users.
392 initializeFunctionInfo(CPEMIs);
393 CPEMIs.clear();
394 LLVM_DEBUG(dumpBBs());
396 // Functions with jump tables need an alignment of 4 because they use the ADR
397 // instruction, which aligns the PC to 4 bytes before adding an offset.
398 if (!T2JumpTables.empty())
399 MF->ensureAlignment(2);
401 /// Remove dead constant pool entries.
402 MadeChange |= removeUnusedCPEntries();
404 // Iteratively place constant pool entries and fix up branches until there
405 // is no change.
406 unsigned NoCPIters = 0, NoBRIters = 0;
407 while (true) {
408 LLVM_DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
409 bool CPChange = false;
410 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
411 // For most inputs, it converges in no more than 5 iterations.
412 // If it doesn't end in 10, the input may have huge BB or many CPEs.
413 // In this case, we will try different heuristics.
414 CPChange |= handleConstantPoolUser(i, NoCPIters >= CPMaxIteration / 2);
415 if (CPChange && ++NoCPIters > CPMaxIteration)
416 report_fatal_error("Constant Island pass failed to converge!");
417 LLVM_DEBUG(dumpBBs());
419 // Clear NewWaterList now. If we split a block for branches, it should
420 // appear as "new water" for the next iteration of constant pool placement.
421 NewWaterList.clear();
423 LLVM_DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
424 bool BRChange = false;
425 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
426 BRChange |= fixupImmediateBr(ImmBranches[i]);
427 if (BRChange && ++NoBRIters > 30)
428 report_fatal_error("Branch Fix Up pass failed to converge!");
429 LLVM_DEBUG(dumpBBs());
431 if (!CPChange && !BRChange)
432 break;
433 MadeChange = true;
436 // Shrink 32-bit Thumb2 load and store instructions.
437 if (isThumb2 && !STI->prefers32BitThumb())
438 MadeChange |= optimizeThumb2Instructions();
440 // Shrink 32-bit branch instructions.
441 if (isThumb && STI->hasV8MBaselineOps())
442 MadeChange |= optimizeThumb2Branches();
444 // Optimize jump tables using TBB / TBH.
445 if (GenerateTBB && !STI->genExecuteOnly())
446 MadeChange |= optimizeThumb2JumpTables();
448 // After a while, this might be made debug-only, but it is not expensive.
449 verify();
451 // If LR has been forced spilled and no far jump (i.e. BL) has been issued,
452 // undo the spill / restore of LR if possible.
453 if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
454 MadeChange |= undoLRSpillRestore();
456 // Save the mapping between original and cloned constpool entries.
457 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
458 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
459 const CPEntry & CPE = CPEntries[i][j];
460 if (CPE.CPEMI && CPE.CPEMI->getOperand(1).isCPI())
461 AFI->recordCPEClone(i, CPE.CPI);
465 LLVM_DEBUG(dbgs() << '\n'; dumpBBs());
467 BBUtils->clear();
468 WaterList.clear();
469 CPUsers.clear();
470 CPEntries.clear();
471 JumpTableEntryIndices.clear();
472 JumpTableUserIndices.clear();
473 ImmBranches.clear();
474 PushPopMIs.clear();
475 T2JumpTables.clear();
477 return MadeChange;
480 /// Perform the initial placement of the regular constant pool entries.
481 /// To start with, we put them all at the end of the function.
482 void
483 ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs) {
484 // Create the basic block to hold the CPE's.
485 MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
486 MF->push_back(BB);
488 // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
489 unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
491 // Mark the basic block as required by the const-pool.
492 BB->setAlignment(MaxAlign);
494 // The function needs to be as aligned as the basic blocks. The linker may
495 // move functions around based on their alignment.
496 MF->ensureAlignment(BB->getAlignment());
498 // Order the entries in BB by descending alignment. That ensures correct
499 // alignment of all entries as long as BB is sufficiently aligned. Keep
500 // track of the insertion point for each alignment. We are going to bucket
501 // sort the entries as they are created.
502 SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
504 // Add all of the constants from the constant pool to the end block, use an
505 // identity mapping of CPI's to CPE's.
506 const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
508 const DataLayout &TD = MF->getDataLayout();
509 for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
510 unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
511 unsigned Align = CPs[i].getAlignment();
512 assert(isPowerOf2_32(Align) && "Invalid alignment");
513 // Verify that all constant pool entries are a multiple of their alignment.
514 // If not, we would have to pad them out so that instructions stay aligned.
515 assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!");
517 // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
518 unsigned LogAlign = Log2_32(Align);
519 MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
520 MachineInstr *CPEMI =
521 BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
522 .addImm(i).addConstantPoolIndex(i).addImm(Size);
523 CPEMIs.push_back(CPEMI);
525 // Ensure that future entries with higher alignment get inserted before
526 // CPEMI. This is bucket sort with iterators.
527 for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
528 if (InsPoint[a] == InsAt)
529 InsPoint[a] = CPEMI;
531 // Add a new CPEntry, but no corresponding CPUser yet.
532 CPEntries.emplace_back(1, CPEntry(CPEMI, i));
533 ++NumCPEs;
534 LLVM_DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
535 << Size << ", align = " << Align << '\n');
537 LLVM_DEBUG(BB->dump());
540 /// Do initial placement of the jump tables. Because Thumb2's TBB and TBH
541 /// instructions can be made more efficient if the jump table immediately
542 /// follows the instruction, it's best to place them immediately next to their
543 /// jumps to begin with. In almost all cases they'll never be moved from that
544 /// position.
545 void ARMConstantIslands::doInitialJumpTablePlacement(
546 std::vector<MachineInstr *> &CPEMIs) {
547 unsigned i = CPEntries.size();
548 auto MJTI = MF->getJumpTableInfo();
549 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
551 MachineBasicBlock *LastCorrectlyNumberedBB = nullptr;
552 for (MachineBasicBlock &MBB : *MF) {
553 auto MI = MBB.getLastNonDebugInstr();
554 if (MI == MBB.end())
555 continue;
557 unsigned JTOpcode;
558 switch (MI->getOpcode()) {
559 default:
560 continue;
561 case ARM::BR_JTadd:
562 case ARM::BR_JTr:
563 case ARM::tBR_JTr:
564 case ARM::BR_JTm_i12:
565 case ARM::BR_JTm_rs:
566 JTOpcode = ARM::JUMPTABLE_ADDRS;
567 break;
568 case ARM::t2BR_JT:
569 JTOpcode = ARM::JUMPTABLE_INSTS;
570 break;
571 case ARM::tTBB_JT:
572 case ARM::t2TBB_JT:
573 JTOpcode = ARM::JUMPTABLE_TBB;
574 break;
575 case ARM::tTBH_JT:
576 case ARM::t2TBH_JT:
577 JTOpcode = ARM::JUMPTABLE_TBH;
578 break;
581 unsigned NumOps = MI->getDesc().getNumOperands();
582 MachineOperand JTOp =
583 MI->getOperand(NumOps - (MI->isPredicable() ? 2 : 1));
584 unsigned JTI = JTOp.getIndex();
585 unsigned Size = JT[JTI].MBBs.size() * sizeof(uint32_t);
586 MachineBasicBlock *JumpTableBB = MF->CreateMachineBasicBlock();
587 MF->insert(std::next(MachineFunction::iterator(MBB)), JumpTableBB);
588 MachineInstr *CPEMI = BuildMI(*JumpTableBB, JumpTableBB->begin(),
589 DebugLoc(), TII->get(JTOpcode))
590 .addImm(i++)
591 .addJumpTableIndex(JTI)
592 .addImm(Size);
593 CPEMIs.push_back(CPEMI);
594 CPEntries.emplace_back(1, CPEntry(CPEMI, JTI));
595 JumpTableEntryIndices.insert(std::make_pair(JTI, CPEntries.size() - 1));
596 if (!LastCorrectlyNumberedBB)
597 LastCorrectlyNumberedBB = &MBB;
600 // If we did anything then we need to renumber the subsequent blocks.
601 if (LastCorrectlyNumberedBB)
602 MF->RenumberBlocks(LastCorrectlyNumberedBB);
605 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
606 /// into the block immediately after it.
607 bool ARMConstantIslands::BBHasFallthrough(MachineBasicBlock *MBB) {
608 // Get the next machine basic block in the function.
609 MachineFunction::iterator MBBI = MBB->getIterator();
610 // Can't fall off end of function.
611 if (std::next(MBBI) == MBB->getParent()->end())
612 return false;
614 MachineBasicBlock *NextBB = &*std::next(MBBI);
615 if (!MBB->isSuccessor(NextBB))
616 return false;
618 // Try to analyze the end of the block. A potential fallthrough may already
619 // have an unconditional branch for whatever reason.
620 MachineBasicBlock *TBB, *FBB;
621 SmallVector<MachineOperand, 4> Cond;
622 bool TooDifficult = TII->analyzeBranch(*MBB, TBB, FBB, Cond);
623 return TooDifficult || FBB == nullptr;
626 /// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
627 /// look up the corresponding CPEntry.
628 ARMConstantIslands::CPEntry *
629 ARMConstantIslands::findConstPoolEntry(unsigned CPI,
630 const MachineInstr *CPEMI) {
631 std::vector<CPEntry> &CPEs = CPEntries[CPI];
632 // Number of entries per constpool index should be small, just do a
633 // linear search.
634 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
635 if (CPEs[i].CPEMI == CPEMI)
636 return &CPEs[i];
638 return nullptr;
641 /// getCPELogAlign - Returns the required alignment of the constant pool entry
642 /// represented by CPEMI. Alignment is measured in log2(bytes) units.
643 unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
644 switch (CPEMI->getOpcode()) {
645 case ARM::CONSTPOOL_ENTRY:
646 break;
647 case ARM::JUMPTABLE_TBB:
648 return isThumb1 ? 2 : 0;
649 case ARM::JUMPTABLE_TBH:
650 return isThumb1 ? 2 : 1;
651 case ARM::JUMPTABLE_INSTS:
652 return 1;
653 case ARM::JUMPTABLE_ADDRS:
654 return 2;
655 default:
656 llvm_unreachable("unknown constpool entry kind");
659 unsigned CPI = getCombinedIndex(CPEMI);
660 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
661 unsigned Align = MCP->getConstants()[CPI].getAlignment();
662 assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
663 return Log2_32(Align);
666 /// scanFunctionJumpTables - Do a scan of the function, building up
667 /// information about the sizes of each block and the locations of all
668 /// the jump tables.
669 void ARMConstantIslands::scanFunctionJumpTables() {
670 for (MachineBasicBlock &MBB : *MF) {
671 for (MachineInstr &I : MBB)
672 if (I.isBranch() &&
673 (I.getOpcode() == ARM::t2BR_JT || I.getOpcode() == ARM::tBR_JTr))
674 T2JumpTables.push_back(&I);
678 /// initializeFunctionInfo - Do the initial scan of the function, building up
679 /// information about the sizes of each block, the location of all the water,
680 /// and finding all of the constant pool users.
681 void ARMConstantIslands::
682 initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
684 BBUtils->computeAllBlockSizes();
685 BBInfoVector &BBInfo = BBUtils->getBBInfo();
686 // The known bits of the entry block offset are determined by the function
687 // alignment.
688 BBInfo.front().KnownBits = MF->getAlignment();
690 // Compute block offsets and known bits.
691 BBUtils->adjustBBOffsetsAfter(&MF->front());
693 // Now go back through the instructions and build up our data structures.
694 for (MachineBasicBlock &MBB : *MF) {
695 // If this block doesn't fall through into the next MBB, then this is
696 // 'water' that a constant pool island could be placed.
697 if (!BBHasFallthrough(&MBB))
698 WaterList.push_back(&MBB);
700 for (MachineInstr &I : MBB) {
701 if (I.isDebugInstr())
702 continue;
704 unsigned Opc = I.getOpcode();
705 if (I.isBranch()) {
706 bool isCond = false;
707 unsigned Bits = 0;
708 unsigned Scale = 1;
709 int UOpc = Opc;
710 switch (Opc) {
711 default:
712 continue; // Ignore other JT branches
713 case ARM::t2BR_JT:
714 case ARM::tBR_JTr:
715 T2JumpTables.push_back(&I);
716 continue; // Does not get an entry in ImmBranches
717 case ARM::Bcc:
718 isCond = true;
719 UOpc = ARM::B;
720 LLVM_FALLTHROUGH;
721 case ARM::B:
722 Bits = 24;
723 Scale = 4;
724 break;
725 case ARM::tBcc:
726 isCond = true;
727 UOpc = ARM::tB;
728 Bits = 8;
729 Scale = 2;
730 break;
731 case ARM::tB:
732 Bits = 11;
733 Scale = 2;
734 break;
735 case ARM::t2Bcc:
736 isCond = true;
737 UOpc = ARM::t2B;
738 Bits = 20;
739 Scale = 2;
740 break;
741 case ARM::t2B:
742 Bits = 24;
743 Scale = 2;
744 break;
747 // Record this immediate branch.
748 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
749 ImmBranches.push_back(ImmBranch(&I, MaxOffs, isCond, UOpc));
752 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
753 PushPopMIs.push_back(&I);
755 if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
756 Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
757 Opc == ARM::JUMPTABLE_TBH)
758 continue;
760 // Scan the instructions for constant pool operands.
761 for (unsigned op = 0, e = I.getNumOperands(); op != e; ++op)
762 if (I.getOperand(op).isCPI() || I.getOperand(op).isJTI()) {
763 // We found one. The addressing mode tells us the max displacement
764 // from the PC that this instruction permits.
766 // Basic size info comes from the TSFlags field.
767 unsigned Bits = 0;
768 unsigned Scale = 1;
769 bool NegOk = false;
770 bool IsSoImm = false;
772 switch (Opc) {
773 default:
774 llvm_unreachable("Unknown addressing mode for CP reference!");
776 // Taking the address of a CP entry.
777 case ARM::LEApcrel:
778 case ARM::LEApcrelJT:
779 // This takes a SoImm, which is 8 bit immediate rotated. We'll
780 // pretend the maximum offset is 255 * 4. Since each instruction
781 // 4 byte wide, this is always correct. We'll check for other
782 // displacements that fits in a SoImm as well.
783 Bits = 8;
784 Scale = 4;
785 NegOk = true;
786 IsSoImm = true;
787 break;
788 case ARM::t2LEApcrel:
789 case ARM::t2LEApcrelJT:
790 Bits = 12;
791 NegOk = true;
792 break;
793 case ARM::tLEApcrel:
794 case ARM::tLEApcrelJT:
795 Bits = 8;
796 Scale = 4;
797 break;
799 case ARM::LDRBi12:
800 case ARM::LDRi12:
801 case ARM::LDRcp:
802 case ARM::t2LDRpci:
803 case ARM::t2LDRHpci:
804 case ARM::t2LDRBpci:
805 Bits = 12; // +-offset_12
806 NegOk = true;
807 break;
809 case ARM::tLDRpci:
810 Bits = 8;
811 Scale = 4; // +(offset_8*4)
812 break;
814 case ARM::VLDRD:
815 case ARM::VLDRS:
816 Bits = 8;
817 Scale = 4; // +-(offset_8*4)
818 NegOk = true;
819 break;
820 case ARM::VLDRH:
821 Bits = 8;
822 Scale = 2; // +-(offset_8*2)
823 NegOk = true;
824 break;
827 // Remember that this is a user of a CP entry.
828 unsigned CPI = I.getOperand(op).getIndex();
829 if (I.getOperand(op).isJTI()) {
830 JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
831 CPI = JumpTableEntryIndices[CPI];
834 MachineInstr *CPEMI = CPEMIs[CPI];
835 unsigned MaxOffs = ((1 << Bits)-1) * Scale;
836 CPUsers.push_back(CPUser(&I, CPEMI, MaxOffs, NegOk, IsSoImm));
838 // Increment corresponding CPEntry reference count.
839 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
840 assert(CPE && "Cannot find a corresponding CPEntry!");
841 CPE->RefCount++;
843 // Instructions can only use one CP entry, don't bother scanning the
844 // rest of the operands.
845 break;
851 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
852 /// ID.
853 static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
854 const MachineBasicBlock *RHS) {
855 return LHS->getNumber() < RHS->getNumber();
858 /// updateForInsertedWaterBlock - When a block is newly inserted into the
859 /// machine function, it upsets all of the block numbers. Renumber the blocks
860 /// and update the arrays that parallel this numbering.
861 void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
862 // Renumber the MBB's to keep them consecutive.
863 NewBB->getParent()->RenumberBlocks(NewBB);
865 // Insert an entry into BBInfo to align it properly with the (newly
866 // renumbered) block numbers.
867 BBUtils->insert(NewBB->getNumber(), BasicBlockInfo());
869 // Next, update WaterList. Specifically, we need to add NewMBB as having
870 // available water after it.
871 water_iterator IP = llvm::lower_bound(WaterList, NewBB, CompareMBBNumbers);
872 WaterList.insert(IP, NewBB);
875 /// Split the basic block containing MI into two blocks, which are joined by
876 /// an unconditional branch. Update data structures and renumber blocks to
877 /// account for this change and returns the newly created block.
878 MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
879 MachineBasicBlock *OrigBB = MI->getParent();
881 // Collect liveness information at MI.
882 LivePhysRegs LRs(*MF->getSubtarget().getRegisterInfo());
883 LRs.addLiveOuts(*OrigBB);
884 auto LivenessEnd = ++MachineBasicBlock::iterator(MI).getReverse();
885 for (MachineInstr &LiveMI : make_range(OrigBB->rbegin(), LivenessEnd))
886 LRs.stepBackward(LiveMI);
888 // Create a new MBB for the code after the OrigBB.
889 MachineBasicBlock *NewBB =
890 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
891 MachineFunction::iterator MBBI = ++OrigBB->getIterator();
892 MF->insert(MBBI, NewBB);
894 // Splice the instructions starting with MI over to NewBB.
895 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
897 // Add an unconditional branch from OrigBB to NewBB.
898 // Note the new unconditional branch is not being recorded.
899 // There doesn't seem to be meaningful DebugInfo available; this doesn't
900 // correspond to anything in the source.
901 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
902 if (!isThumb)
903 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
904 else
905 BuildMI(OrigBB, DebugLoc(), TII->get(Opc))
906 .addMBB(NewBB)
907 .add(predOps(ARMCC::AL));
908 ++NumSplit;
910 // Update the CFG. All succs of OrigBB are now succs of NewBB.
911 NewBB->transferSuccessors(OrigBB);
913 // OrigBB branches to NewBB.
914 OrigBB->addSuccessor(NewBB);
916 // Update live-in information in the new block.
917 MachineRegisterInfo &MRI = MF->getRegInfo();
918 for (MCPhysReg L : LRs)
919 if (!MRI.isReserved(L))
920 NewBB->addLiveIn(L);
922 // Update internal data structures to account for the newly inserted MBB.
923 // This is almost the same as updateForInsertedWaterBlock, except that
924 // the Water goes after OrigBB, not NewBB.
925 MF->RenumberBlocks(NewBB);
927 // Insert an entry into BBInfo to align it properly with the (newly
928 // renumbered) block numbers.
929 BBUtils->insert(NewBB->getNumber(), BasicBlockInfo());
931 // Next, update WaterList. Specifically, we need to add OrigMBB as having
932 // available water after it (but not if it's already there, which happens
933 // when splitting before a conditional branch that is followed by an
934 // unconditional branch - in that case we want to insert NewBB).
935 water_iterator IP = llvm::lower_bound(WaterList, OrigBB, CompareMBBNumbers);
936 MachineBasicBlock* WaterBB = *IP;
937 if (WaterBB == OrigBB)
938 WaterList.insert(std::next(IP), NewBB);
939 else
940 WaterList.insert(IP, OrigBB);
941 NewWaterList.insert(OrigBB);
943 // Figure out how large the OrigBB is. As the first half of the original
944 // block, it cannot contain a tablejump. The size includes
945 // the new jump we added. (It should be possible to do this without
946 // recounting everything, but it's very confusing, and this is rarely
947 // executed.)
948 BBUtils->computeBlockSize(OrigBB);
950 // Figure out how large the NewMBB is. As the second half of the original
951 // block, it may contain a tablejump.
952 BBUtils->computeBlockSize(NewBB);
954 // All BBOffsets following these blocks must be modified.
955 BBUtils->adjustBBOffsetsAfter(OrigBB);
957 return NewBB;
960 /// getUserOffset - Compute the offset of U.MI as seen by the hardware
961 /// displacement computation. Update U.KnownAlignment to match its current
962 /// basic block location.
963 unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
964 unsigned UserOffset = BBUtils->getOffsetOf(U.MI);
966 SmallVectorImpl<BasicBlockInfo> &BBInfo = BBUtils->getBBInfo();
967 const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
968 unsigned KnownBits = BBI.internalKnownBits();
970 // The value read from PC is offset from the actual instruction address.
971 UserOffset += (isThumb ? 4 : 8);
973 // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
974 // Make sure U.getMaxDisp() returns a constrained range.
975 U.KnownAlignment = (KnownBits >= 2);
977 // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
978 // purposes of the displacement computation; compensate for that here.
979 // For unknown alignments, getMaxDisp() constrains the range instead.
980 if (isThumb && U.KnownAlignment)
981 UserOffset &= ~3u;
983 return UserOffset;
986 /// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
987 /// reference) is within MaxDisp of TrialOffset (a proposed location of a
988 /// constant pool entry).
989 /// UserOffset is computed by getUserOffset above to include PC adjustments. If
990 /// the mod 4 alignment of UserOffset is not known, the uncertainty must be
991 /// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
992 bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
993 unsigned TrialOffset, unsigned MaxDisp,
994 bool NegativeOK, bool IsSoImm) {
995 if (UserOffset <= TrialOffset) {
996 // User before the Trial.
997 if (TrialOffset - UserOffset <= MaxDisp)
998 return true;
999 // FIXME: Make use full range of soimm values.
1000 } else if (NegativeOK) {
1001 if (UserOffset - TrialOffset <= MaxDisp)
1002 return true;
1003 // FIXME: Make use full range of soimm values.
1005 return false;
1008 /// isWaterInRange - Returns true if a CPE placed after the specified
1009 /// Water (a basic block) will be in range for the specific MI.
1011 /// Compute how much the function will grow by inserting a CPE after Water.
1012 bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
1013 MachineBasicBlock* Water, CPUser &U,
1014 unsigned &Growth) {
1015 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1016 unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
1017 unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
1018 unsigned NextBlockOffset, NextBlockAlignment;
1019 MachineFunction::const_iterator NextBlock = Water->getIterator();
1020 if (++NextBlock == MF->end()) {
1021 NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
1022 NextBlockAlignment = 0;
1023 } else {
1024 NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
1025 NextBlockAlignment = NextBlock->getAlignment();
1027 unsigned Size = U.CPEMI->getOperand(2).getImm();
1028 unsigned CPEEnd = CPEOffset + Size;
1030 // The CPE may be able to hide in the alignment padding before the next
1031 // block. It may also cause more padding to be required if it is more aligned
1032 // that the next block.
1033 if (CPEEnd > NextBlockOffset) {
1034 Growth = CPEEnd - NextBlockOffset;
1035 // Compute the padding that would go at the end of the CPE to align the next
1036 // block.
1037 Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
1039 // If the CPE is to be inserted before the instruction, that will raise
1040 // the offset of the instruction. Also account for unknown alignment padding
1041 // in blocks between CPE and the user.
1042 if (CPEOffset < UserOffset)
1043 UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
1044 } else
1045 // CPE fits in existing padding.
1046 Growth = 0;
1048 return isOffsetInRange(UserOffset, CPEOffset, U);
1051 /// isCPEntryInRange - Returns true if the distance between specific MI and
1052 /// specific ConstPool entry instruction can fit in MI's displacement field.
1053 bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
1054 MachineInstr *CPEMI, unsigned MaxDisp,
1055 bool NegOk, bool DoDump) {
1056 unsigned CPEOffset = BBUtils->getOffsetOf(CPEMI);
1058 if (DoDump) {
1059 LLVM_DEBUG({
1060 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1061 unsigned Block = MI->getParent()->getNumber();
1062 const BasicBlockInfo &BBI = BBInfo[Block];
1063 dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
1064 << " max delta=" << MaxDisp
1065 << format(" insn address=%#x", UserOffset) << " in "
1066 << printMBBReference(*MI->getParent()) << ": "
1067 << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
1068 << format("CPE address=%#x offset=%+d: ", CPEOffset,
1069 int(CPEOffset - UserOffset));
1073 return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
1076 #ifndef NDEBUG
1077 /// BBIsJumpedOver - Return true of the specified basic block's only predecessor
1078 /// unconditionally branches to its only successor.
1079 static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
1080 if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
1081 return false;
1083 MachineBasicBlock *Succ = *MBB->succ_begin();
1084 MachineBasicBlock *Pred = *MBB->pred_begin();
1085 MachineInstr *PredMI = &Pred->back();
1086 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
1087 || PredMI->getOpcode() == ARM::t2B)
1088 return PredMI->getOperand(0).getMBB() == Succ;
1089 return false;
1091 #endif // NDEBUG
1093 /// decrementCPEReferenceCount - find the constant pool entry with index CPI
1094 /// and instruction CPEMI, and decrement its refcount. If the refcount
1095 /// becomes 0 remove the entry and instruction. Returns true if we removed
1096 /// the entry, false if we didn't.
1097 bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
1098 MachineInstr *CPEMI) {
1099 // Find the old entry. Eliminate it if it is no longer used.
1100 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
1101 assert(CPE && "Unexpected!");
1102 if (--CPE->RefCount == 0) {
1103 removeDeadCPEMI(CPEMI);
1104 CPE->CPEMI = nullptr;
1105 --NumCPEs;
1106 return true;
1108 return false;
1111 unsigned ARMConstantIslands::getCombinedIndex(const MachineInstr *CPEMI) {
1112 if (CPEMI->getOperand(1).isCPI())
1113 return CPEMI->getOperand(1).getIndex();
1115 return JumpTableEntryIndices[CPEMI->getOperand(1).getIndex()];
1118 /// LookForCPEntryInRange - see if the currently referenced CPE is in range;
1119 /// if not, see if an in-range clone of the CPE is in range, and if so,
1120 /// change the data structures so the user references the clone. Returns:
1121 /// 0 = no existing entry found
1122 /// 1 = entry found, and there were no code insertions or deletions
1123 /// 2 = entry found, and there were code insertions or deletions
1124 int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset) {
1125 MachineInstr *UserMI = U.MI;
1126 MachineInstr *CPEMI = U.CPEMI;
1128 // Check to see if the CPE is already in-range.
1129 if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
1130 true)) {
1131 LLVM_DEBUG(dbgs() << "In range\n");
1132 return 1;
1135 // No. Look for previously created clones of the CPE that are in range.
1136 unsigned CPI = getCombinedIndex(CPEMI);
1137 std::vector<CPEntry> &CPEs = CPEntries[CPI];
1138 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
1139 // We already tried this one
1140 if (CPEs[i].CPEMI == CPEMI)
1141 continue;
1142 // Removing CPEs can leave empty entries, skip
1143 if (CPEs[i].CPEMI == nullptr)
1144 continue;
1145 if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
1146 U.NegOk)) {
1147 LLVM_DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
1148 << CPEs[i].CPI << "\n");
1149 // Point the CPUser node to the replacement
1150 U.CPEMI = CPEs[i].CPEMI;
1151 // Change the CPI in the instruction operand to refer to the clone.
1152 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
1153 if (UserMI->getOperand(j).isCPI()) {
1154 UserMI->getOperand(j).setIndex(CPEs[i].CPI);
1155 break;
1157 // Adjust the refcount of the clone...
1158 CPEs[i].RefCount++;
1159 // ...and the original. If we didn't remove the old entry, none of the
1160 // addresses changed, so we don't need another pass.
1161 return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
1164 return 0;
1167 /// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
1168 /// the specific unconditional branch instruction.
1169 static inline unsigned getUnconditionalBrDisp(int Opc) {
1170 switch (Opc) {
1171 case ARM::tB:
1172 return ((1<<10)-1)*2;
1173 case ARM::t2B:
1174 return ((1<<23)-1)*2;
1175 default:
1176 break;
1179 return ((1<<23)-1)*4;
1182 /// findAvailableWater - Look for an existing entry in the WaterList in which
1183 /// we can place the CPE referenced from U so it's within range of U's MI.
1184 /// Returns true if found, false if not. If it returns true, WaterIter
1185 /// is set to the WaterList entry. For Thumb, prefer water that will not
1186 /// introduce padding to water that will. To ensure that this pass
1187 /// terminates, the CPE location for a particular CPUser is only allowed to
1188 /// move to a lower address, so search backward from the end of the list and
1189 /// prefer the first water that is in range.
1190 bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
1191 water_iterator &WaterIter,
1192 bool CloserWater) {
1193 if (WaterList.empty())
1194 return false;
1196 unsigned BestGrowth = ~0u;
1197 // The nearest water without splitting the UserBB is right after it.
1198 // If the distance is still large (we have a big BB), then we need to split it
1199 // if we don't converge after certain iterations. This helps the following
1200 // situation to converge:
1201 // BB0:
1202 // Big BB
1203 // BB1:
1204 // Constant Pool
1205 // When a CP access is out of range, BB0 may be used as water. However,
1206 // inserting islands between BB0 and BB1 makes other accesses out of range.
1207 MachineBasicBlock *UserBB = U.MI->getParent();
1208 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1209 unsigned MinNoSplitDisp =
1210 BBInfo[UserBB->getNumber()].postOffset(getCPELogAlign(U.CPEMI));
1211 if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
1212 return false;
1213 for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
1214 --IP) {
1215 MachineBasicBlock* WaterBB = *IP;
1216 // Check if water is in range and is either at a lower address than the
1217 // current "high water mark" or a new water block that was created since
1218 // the previous iteration by inserting an unconditional branch. In the
1219 // latter case, we want to allow resetting the high water mark back to
1220 // this new water since we haven't seen it before. Inserting branches
1221 // should be relatively uncommon and when it does happen, we want to be
1222 // sure to take advantage of it for all the CPEs near that block, so that
1223 // we don't insert more branches than necessary.
1224 // When CloserWater is true, we try to find the lowest address after (or
1225 // equal to) user MI's BB no matter of padding growth.
1226 unsigned Growth;
1227 if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
1228 (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
1229 NewWaterList.count(WaterBB) || WaterBB == U.MI->getParent()) &&
1230 Growth < BestGrowth) {
1231 // This is the least amount of required padding seen so far.
1232 BestGrowth = Growth;
1233 WaterIter = IP;
1234 LLVM_DEBUG(dbgs() << "Found water after " << printMBBReference(*WaterBB)
1235 << " Growth=" << Growth << '\n');
1237 if (CloserWater && WaterBB == U.MI->getParent())
1238 return true;
1239 // Keep looking unless it is perfect and we're not looking for the lowest
1240 // possible address.
1241 if (!CloserWater && BestGrowth == 0)
1242 return true;
1244 if (IP == B)
1245 break;
1247 return BestGrowth != ~0u;
1250 /// createNewWater - No existing WaterList entry will work for
1251 /// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
1252 /// block is used if in range, and the conditional branch munged so control
1253 /// flow is correct. Otherwise the block is split to create a hole with an
1254 /// unconditional branch around it. In either case NewMBB is set to a
1255 /// block following which the new island can be inserted (the WaterList
1256 /// is not adjusted).
1257 void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
1258 unsigned UserOffset,
1259 MachineBasicBlock *&NewMBB) {
1260 CPUser &U = CPUsers[CPUserIndex];
1261 MachineInstr *UserMI = U.MI;
1262 MachineInstr *CPEMI = U.CPEMI;
1263 unsigned CPELogAlign = getCPELogAlign(CPEMI);
1264 MachineBasicBlock *UserMBB = UserMI->getParent();
1265 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1266 const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
1268 // If the block does not end in an unconditional branch already, and if the
1269 // end of the block is within range, make new water there. (The addition
1270 // below is for the unconditional branch we will be adding: 4 bytes on ARM +
1271 // Thumb2, 2 on Thumb1.
1272 if (BBHasFallthrough(UserMBB)) {
1273 // Size of branch to insert.
1274 unsigned Delta = isThumb1 ? 2 : 4;
1275 // Compute the offset where the CPE will begin.
1276 unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
1278 if (isOffsetInRange(UserOffset, CPEOffset, U)) {
1279 LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB)
1280 << format(", expected CPE offset %#x\n", CPEOffset));
1281 NewMBB = &*++UserMBB->getIterator();
1282 // Add an unconditional branch from UserMBB to fallthrough block. Record
1283 // it for branch lengthening; this new branch will not get out of range,
1284 // but if the preceding conditional branch is out of range, the targets
1285 // will be exchanged, and the altered branch may be out of range, so the
1286 // machinery has to know about it.
1287 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
1288 if (!isThumb)
1289 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
1290 else
1291 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr))
1292 .addMBB(NewMBB)
1293 .add(predOps(ARMCC::AL));
1294 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
1295 ImmBranches.push_back(ImmBranch(&UserMBB->back(),
1296 MaxDisp, false, UncondBr));
1297 BBUtils->computeBlockSize(UserMBB);
1298 BBUtils->adjustBBOffsetsAfter(UserMBB);
1299 return;
1303 // What a big block. Find a place within the block to split it. This is a
1304 // little tricky on Thumb1 since instructions are 2 bytes and constant pool
1305 // entries are 4 bytes: if instruction I references island CPE, and
1306 // instruction I+1 references CPE', it will not work well to put CPE as far
1307 // forward as possible, since then CPE' cannot immediately follow it (that
1308 // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
1309 // need to create a new island. So, we make a first guess, then walk through
1310 // the instructions between the one currently being looked at and the
1311 // possible insertion point, and make sure any other instructions that
1312 // reference CPEs will be able to use the same island area; if not, we back
1313 // up the insertion point.
1315 // Try to split the block so it's fully aligned. Compute the latest split
1316 // point where we can add a 4-byte branch instruction, and then align to
1317 // LogAlign which is the largest possible alignment in the function.
1318 unsigned LogAlign = MF->getAlignment();
1319 assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
1320 unsigned KnownBits = UserBBI.internalKnownBits();
1321 unsigned UPad = UnknownPadding(LogAlign, KnownBits);
1322 unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
1323 LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x",
1324 BaseInsertOffset));
1326 // The 4 in the following is for the unconditional branch we'll be inserting
1327 // (allows for long branch on Thumb1). Alignment of the island is handled
1328 // inside isOffsetInRange.
1329 BaseInsertOffset -= 4;
1331 LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
1332 << " la=" << LogAlign << " kb=" << KnownBits
1333 << " up=" << UPad << '\n');
1335 // This could point off the end of the block if we've already got constant
1336 // pool entries following this block; only the last one is in the water list.
1337 // Back past any possible branches (allow for a conditional and a maximally
1338 // long unconditional).
1339 if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
1340 // Ensure BaseInsertOffset is larger than the offset of the instruction
1341 // following UserMI so that the loop which searches for the split point
1342 // iterates at least once.
1343 BaseInsertOffset =
1344 std::max(UserBBI.postOffset() - UPad - 8,
1345 UserOffset + TII->getInstSizeInBytes(*UserMI) + 1);
1346 // If the CP is referenced(ie, UserOffset) is in first four instructions
1347 // after IT, this recalculated BaseInsertOffset could be in the middle of
1348 // an IT block. If it is, change the BaseInsertOffset to just after the
1349 // IT block. This still make the CP Entry is in range becuase of the
1350 // following reasons.
1351 // 1. The initial BaseseInsertOffset calculated is (UserOffset +
1352 // U.getMaxDisp() - UPad).
1353 // 2. An IT block is only at most 4 instructions plus the "it" itself (18
1354 // bytes).
1355 // 3. All the relevant instructions support much larger Maximum
1356 // displacement.
1357 MachineBasicBlock::iterator I = UserMI;
1358 ++I;
1359 for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI),
1360 PredReg = 0;
1361 I->getOpcode() != ARM::t2IT &&
1362 getITInstrPredicate(*I, PredReg) != ARMCC::AL;
1363 Offset += TII->getInstSizeInBytes(*I), I = std::next(I)) {
1364 BaseInsertOffset =
1365 std::max(BaseInsertOffset, Offset + TII->getInstSizeInBytes(*I) + 1);
1366 assert(I != UserMBB->end() && "Fell off end of block");
1368 LLVM_DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
1370 unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
1371 CPEMI->getOperand(2).getImm();
1372 MachineBasicBlock::iterator MI = UserMI;
1373 ++MI;
1374 unsigned CPUIndex = CPUserIndex+1;
1375 unsigned NumCPUsers = CPUsers.size();
1376 MachineInstr *LastIT = nullptr;
1377 for (unsigned Offset = UserOffset + TII->getInstSizeInBytes(*UserMI);
1378 Offset < BaseInsertOffset;
1379 Offset += TII->getInstSizeInBytes(*MI), MI = std::next(MI)) {
1380 assert(MI != UserMBB->end() && "Fell off end of block");
1381 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == &*MI) {
1382 CPUser &U = CPUsers[CPUIndex];
1383 if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
1384 // Shift intertion point by one unit of alignment so it is within reach.
1385 BaseInsertOffset -= 1u << LogAlign;
1386 EndInsertOffset -= 1u << LogAlign;
1388 // This is overly conservative, as we don't account for CPEMIs being
1389 // reused within the block, but it doesn't matter much. Also assume CPEs
1390 // are added in order with alignment padding. We may eventually be able
1391 // to pack the aligned CPEs better.
1392 EndInsertOffset += U.CPEMI->getOperand(2).getImm();
1393 CPUIndex++;
1396 // Remember the last IT instruction.
1397 if (MI->getOpcode() == ARM::t2IT)
1398 LastIT = &*MI;
1401 --MI;
1403 // Avoid splitting an IT block.
1404 if (LastIT) {
1405 unsigned PredReg = 0;
1406 ARMCC::CondCodes CC = getITInstrPredicate(*MI, PredReg);
1407 if (CC != ARMCC::AL)
1408 MI = LastIT;
1411 // Avoid splitting a MOVW+MOVT pair with a relocation on Windows.
1412 // On Windows, this instruction pair is covered by one single
1413 // IMAGE_REL_ARM_MOV32T relocation which covers both instructions. If a
1414 // constant island is injected inbetween them, the relocation will clobber
1415 // the instruction and fail to update the MOVT instruction.
1416 // (These instructions are bundled up until right before the ConstantIslands
1417 // pass.)
1418 if (STI->isTargetWindows() && isThumb && MI->getOpcode() == ARM::t2MOVTi16 &&
1419 (MI->getOperand(2).getTargetFlags() & ARMII::MO_OPTION_MASK) ==
1420 ARMII::MO_HI16) {
1421 --MI;
1422 assert(MI->getOpcode() == ARM::t2MOVi16 &&
1423 (MI->getOperand(1).getTargetFlags() & ARMII::MO_OPTION_MASK) ==
1424 ARMII::MO_LO16);
1427 // We really must not split an IT block.
1428 #ifndef NDEBUG
1429 unsigned PredReg;
1430 assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL);
1431 #endif
1432 NewMBB = splitBlockBeforeInstr(&*MI);
1435 /// handleConstantPoolUser - Analyze the specified user, checking to see if it
1436 /// is out-of-range. If so, pick up the constant pool value and move it some
1437 /// place in-range. Return true if we changed any addresses (thus must run
1438 /// another pass of branch lengthening), false otherwise.
1439 bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
1440 bool CloserWater) {
1441 CPUser &U = CPUsers[CPUserIndex];
1442 MachineInstr *UserMI = U.MI;
1443 MachineInstr *CPEMI = U.CPEMI;
1444 unsigned CPI = getCombinedIndex(CPEMI);
1445 unsigned Size = CPEMI->getOperand(2).getImm();
1446 // Compute this only once, it's expensive.
1447 unsigned UserOffset = getUserOffset(U);
1449 // See if the current entry is within range, or there is a clone of it
1450 // in range.
1451 int result = findInRangeCPEntry(U, UserOffset);
1452 if (result==1) return false;
1453 else if (result==2) return true;
1455 // No existing clone of this CPE is within range.
1456 // We will be generating a new clone. Get a UID for it.
1457 unsigned ID = AFI->createPICLabelUId();
1459 // Look for water where we can place this CPE.
1460 MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
1461 MachineBasicBlock *NewMBB;
1462 water_iterator IP;
1463 if (findAvailableWater(U, UserOffset, IP, CloserWater)) {
1464 LLVM_DEBUG(dbgs() << "Found water in range\n");
1465 MachineBasicBlock *WaterBB = *IP;
1467 // If the original WaterList entry was "new water" on this iteration,
1468 // propagate that to the new island. This is just keeping NewWaterList
1469 // updated to match the WaterList, which will be updated below.
1470 if (NewWaterList.erase(WaterBB))
1471 NewWaterList.insert(NewIsland);
1473 // The new CPE goes before the following block (NewMBB).
1474 NewMBB = &*++WaterBB->getIterator();
1475 } else {
1476 // No water found.
1477 LLVM_DEBUG(dbgs() << "No water found\n");
1478 createNewWater(CPUserIndex, UserOffset, NewMBB);
1480 // splitBlockBeforeInstr adds to WaterList, which is important when it is
1481 // called while handling branches so that the water will be seen on the
1482 // next iteration for constant pools, but in this context, we don't want
1483 // it. Check for this so it will be removed from the WaterList.
1484 // Also remove any entry from NewWaterList.
1485 MachineBasicBlock *WaterBB = &*--NewMBB->getIterator();
1486 IP = find(WaterList, WaterBB);
1487 if (IP != WaterList.end())
1488 NewWaterList.erase(WaterBB);
1490 // We are adding new water. Update NewWaterList.
1491 NewWaterList.insert(NewIsland);
1493 // Always align the new block because CP entries can be smaller than 4
1494 // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
1495 // be an already aligned constant pool block.
1496 const unsigned Align = isThumb ? 1 : 2;
1497 if (NewMBB->getAlignment() < Align)
1498 NewMBB->setAlignment(Align);
1500 // Remove the original WaterList entry; we want subsequent insertions in
1501 // this vicinity to go after the one we're about to insert. This
1502 // considerably reduces the number of times we have to move the same CPE
1503 // more than once and is also important to ensure the algorithm terminates.
1504 if (IP != WaterList.end())
1505 WaterList.erase(IP);
1507 // Okay, we know we can put an island before NewMBB now, do it!
1508 MF->insert(NewMBB->getIterator(), NewIsland);
1510 // Update internal data structures to account for the newly inserted MBB.
1511 updateForInsertedWaterBlock(NewIsland);
1513 // Now that we have an island to add the CPE to, clone the original CPE and
1514 // add it to the island.
1515 U.HighWaterMark = NewIsland;
1516 U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc())
1517 .addImm(ID)
1518 .add(CPEMI->getOperand(1))
1519 .addImm(Size);
1520 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
1521 ++NumCPEs;
1523 // Decrement the old entry, and remove it if refcount becomes 0.
1524 decrementCPEReferenceCount(CPI, CPEMI);
1526 // Mark the basic block as aligned as required by the const-pool entry.
1527 NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
1529 // Increase the size of the island block to account for the new entry.
1530 BBUtils->adjustBBSize(NewIsland, Size);
1531 BBUtils->adjustBBOffsetsAfter(&*--NewIsland->getIterator());
1533 // Finally, change the CPI in the instruction operand to be ID.
1534 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
1535 if (UserMI->getOperand(i).isCPI()) {
1536 UserMI->getOperand(i).setIndex(ID);
1537 break;
1540 LLVM_DEBUG(
1541 dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
1542 << format(" offset=%#x\n",
1543 BBUtils->getBBInfo()[NewIsland->getNumber()].Offset));
1545 return true;
1548 /// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
1549 /// sizes and offsets of impacted basic blocks.
1550 void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
1551 MachineBasicBlock *CPEBB = CPEMI->getParent();
1552 unsigned Size = CPEMI->getOperand(2).getImm();
1553 CPEMI->eraseFromParent();
1554 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1555 BBUtils->adjustBBSize(CPEBB, -Size);
1556 // All succeeding offsets have the current size value added in, fix this.
1557 if (CPEBB->empty()) {
1558 BBInfo[CPEBB->getNumber()].Size = 0;
1560 // This block no longer needs to be aligned.
1561 CPEBB->setAlignment(0);
1562 } else
1563 // Entries are sorted by descending alignment, so realign from the front.
1564 CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin()));
1566 BBUtils->adjustBBOffsetsAfter(CPEBB);
1567 // An island has only one predecessor BB and one successor BB. Check if
1568 // this BB's predecessor jumps directly to this BB's successor. This
1569 // shouldn't happen currently.
1570 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
1571 // FIXME: remove the empty blocks after all the work is done?
1574 /// removeUnusedCPEntries - Remove constant pool entries whose refcounts
1575 /// are zero.
1576 bool ARMConstantIslands::removeUnusedCPEntries() {
1577 unsigned MadeChange = false;
1578 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
1579 std::vector<CPEntry> &CPEs = CPEntries[i];
1580 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
1581 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
1582 removeDeadCPEMI(CPEs[j].CPEMI);
1583 CPEs[j].CPEMI = nullptr;
1584 MadeChange = true;
1588 return MadeChange;
1592 /// fixupImmediateBr - Fix up an immediate branch whose destination is too far
1593 /// away to fit in its displacement field.
1594 bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
1595 MachineInstr *MI = Br.MI;
1596 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1598 // Check to see if the DestBB is already in-range.
1599 if (BBUtils->isBBInRange(MI, DestBB, Br.MaxDisp))
1600 return false;
1602 if (!Br.isCond)
1603 return fixupUnconditionalBr(Br);
1604 return fixupConditionalBr(Br);
1607 /// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
1608 /// too far away to fit in its displacement field. If the LR register has been
1609 /// spilled in the epilogue, then we can use BL to implement a far jump.
1610 /// Otherwise, add an intermediate branch instruction to a branch.
1611 bool
1612 ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
1613 MachineInstr *MI = Br.MI;
1614 MachineBasicBlock *MBB = MI->getParent();
1615 if (!isThumb1)
1616 llvm_unreachable("fixupUnconditionalBr is Thumb1 only!");
1618 if (!AFI->isLRSpilled())
1619 report_fatal_error("underestimated function size");
1621 // Use BL to implement far jump.
1622 Br.MaxDisp = (1 << 21) * 2;
1623 MI->setDesc(TII->get(ARM::tBfar));
1624 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1625 BBInfo[MBB->getNumber()].Size += 2;
1626 BBUtils->adjustBBOffsetsAfter(MBB);
1627 HasFarJump = true;
1628 ++NumUBrFixed;
1630 LLVM_DEBUG(dbgs() << " Changed B to long jump " << *MI);
1632 return true;
1635 /// fixupConditionalBr - Fix up a conditional branch whose destination is too
1636 /// far away to fit in its displacement field. It is converted to an inverse
1637 /// conditional branch + an unconditional branch to the destination.
1638 bool
1639 ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
1640 MachineInstr *MI = Br.MI;
1641 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1643 // Add an unconditional branch to the destination and invert the branch
1644 // condition to jump over it:
1645 // blt L1
1646 // =>
1647 // bge L2
1648 // b L1
1649 // L2:
1650 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
1651 CC = ARMCC::getOppositeCondition(CC);
1652 Register CCReg = MI->getOperand(2).getReg();
1654 // If the branch is at the end of its MBB and that has a fall-through block,
1655 // direct the updated conditional branch to the fall-through block. Otherwise,
1656 // split the MBB before the next instruction.
1657 MachineBasicBlock *MBB = MI->getParent();
1658 MachineInstr *BMI = &MBB->back();
1659 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
1661 ++NumCBrFixed;
1662 if (BMI != MI) {
1663 if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
1664 BMI->getOpcode() == Br.UncondBr) {
1665 // Last MI in the BB is an unconditional branch. Can we simply invert the
1666 // condition and swap destinations:
1667 // beq L1
1668 // b L2
1669 // =>
1670 // bne L2
1671 // b L1
1672 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
1673 if (BBUtils->isBBInRange(MI, NewDest, Br.MaxDisp)) {
1674 LLVM_DEBUG(
1675 dbgs() << " Invert Bcc condition and swap its destination with "
1676 << *BMI);
1677 BMI->getOperand(0).setMBB(DestBB);
1678 MI->getOperand(0).setMBB(NewDest);
1679 MI->getOperand(1).setImm(CC);
1680 return true;
1685 if (NeedSplit) {
1686 splitBlockBeforeInstr(MI);
1687 // No need for the branch to the next block. We're adding an unconditional
1688 // branch to the destination.
1689 int delta = TII->getInstSizeInBytes(MBB->back());
1690 BBUtils->adjustBBSize(MBB, -delta);
1691 MBB->back().eraseFromParent();
1693 // The conditional successor will be swapped between the BBs after this, so
1694 // update CFG.
1695 MBB->addSuccessor(DestBB);
1696 std::next(MBB->getIterator())->removeSuccessor(DestBB);
1698 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
1700 MachineBasicBlock *NextBB = &*++MBB->getIterator();
1702 LLVM_DEBUG(dbgs() << " Insert B to " << printMBBReference(*DestBB)
1703 << " also invert condition and change dest. to "
1704 << printMBBReference(*NextBB) << "\n");
1706 // Insert a new conditional branch and a new unconditional branch.
1707 // Also update the ImmBranch as well as adding a new entry for the new branch.
1708 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
1709 .addMBB(NextBB).addImm(CC).addReg(CCReg);
1710 Br.MI = &MBB->back();
1711 BBUtils->adjustBBSize(MBB, TII->getInstSizeInBytes(MBB->back()));
1712 if (isThumb)
1713 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr))
1714 .addMBB(DestBB)
1715 .add(predOps(ARMCC::AL));
1716 else
1717 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
1718 BBUtils->adjustBBSize(MBB, TII->getInstSizeInBytes(MBB->back()));
1719 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
1720 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
1722 // Remove the old conditional branch. It may or may not still be in MBB.
1723 BBUtils->adjustBBSize(MI->getParent(), -TII->getInstSizeInBytes(*MI));
1724 MI->eraseFromParent();
1725 BBUtils->adjustBBOffsetsAfter(MBB);
1726 return true;
1729 /// undoLRSpillRestore - Remove Thumb push / pop instructions that only spills
1730 /// LR / restores LR to pc. FIXME: This is done here because it's only possible
1731 /// to do this if tBfar is not used.
1732 bool ARMConstantIslands::undoLRSpillRestore() {
1733 bool MadeChange = false;
1734 for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
1735 MachineInstr *MI = PushPopMIs[i];
1736 // First two operands are predicates.
1737 if (MI->getOpcode() == ARM::tPOP_RET &&
1738 MI->getOperand(2).getReg() == ARM::PC &&
1739 MI->getNumExplicitOperands() == 3) {
1740 // Create the new insn and copy the predicate from the old.
1741 BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET))
1742 .add(MI->getOperand(0))
1743 .add(MI->getOperand(1));
1744 MI->eraseFromParent();
1745 MadeChange = true;
1746 } else if (MI->getOpcode() == ARM::tPUSH &&
1747 MI->getOperand(2).getReg() == ARM::LR &&
1748 MI->getNumExplicitOperands() == 3) {
1749 // Just remove the push.
1750 MI->eraseFromParent();
1751 MadeChange = true;
1754 return MadeChange;
1757 bool ARMConstantIslands::optimizeThumb2Instructions() {
1758 bool MadeChange = false;
1760 // Shrink ADR and LDR from constantpool.
1761 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
1762 CPUser &U = CPUsers[i];
1763 unsigned Opcode = U.MI->getOpcode();
1764 unsigned NewOpc = 0;
1765 unsigned Scale = 1;
1766 unsigned Bits = 0;
1767 switch (Opcode) {
1768 default: break;
1769 case ARM::t2LEApcrel:
1770 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1771 NewOpc = ARM::tLEApcrel;
1772 Bits = 8;
1773 Scale = 4;
1775 break;
1776 case ARM::t2LDRpci:
1777 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1778 NewOpc = ARM::tLDRpci;
1779 Bits = 8;
1780 Scale = 4;
1782 break;
1785 if (!NewOpc)
1786 continue;
1788 unsigned UserOffset = getUserOffset(U);
1789 unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
1791 // Be conservative with inline asm.
1792 if (!U.KnownAlignment)
1793 MaxOffs -= 2;
1795 // FIXME: Check if offset is multiple of scale if scale is not 4.
1796 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
1797 LLVM_DEBUG(dbgs() << "Shrink: " << *U.MI);
1798 U.MI->setDesc(TII->get(NewOpc));
1799 MachineBasicBlock *MBB = U.MI->getParent();
1800 BBUtils->adjustBBSize(MBB, -2);
1801 BBUtils->adjustBBOffsetsAfter(MBB);
1802 ++NumT2CPShrunk;
1803 MadeChange = true;
1807 return MadeChange;
1810 bool ARMConstantIslands::optimizeThumb2Branches() {
1811 bool MadeChange = false;
1813 // The order in which branches appear in ImmBranches is approximately their
1814 // order within the function body. By visiting later branches first, we reduce
1815 // the distance between earlier forward branches and their targets, making it
1816 // more likely that the cbn?z optimization, which can only apply to forward
1817 // branches, will succeed.
1818 for (unsigned i = ImmBranches.size(); i != 0; --i) {
1819 ImmBranch &Br = ImmBranches[i-1];
1820 unsigned Opcode = Br.MI->getOpcode();
1821 unsigned NewOpc = 0;
1822 unsigned Scale = 1;
1823 unsigned Bits = 0;
1824 switch (Opcode) {
1825 default: break;
1826 case ARM::t2B:
1827 NewOpc = ARM::tB;
1828 Bits = 11;
1829 Scale = 2;
1830 break;
1831 case ARM::t2Bcc:
1832 NewOpc = ARM::tBcc;
1833 Bits = 8;
1834 Scale = 2;
1835 break;
1837 if (NewOpc) {
1838 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
1839 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1840 if (BBUtils->isBBInRange(Br.MI, DestBB, MaxOffs)) {
1841 LLVM_DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
1842 Br.MI->setDesc(TII->get(NewOpc));
1843 MachineBasicBlock *MBB = Br.MI->getParent();
1844 BBUtils->adjustBBSize(MBB, -2);
1845 BBUtils->adjustBBOffsetsAfter(MBB);
1846 ++NumT2BrShrunk;
1847 MadeChange = true;
1851 Opcode = Br.MI->getOpcode();
1852 if (Opcode != ARM::tBcc)
1853 continue;
1855 // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
1856 // so this transformation is not safe.
1857 if (!Br.MI->killsRegister(ARM::CPSR))
1858 continue;
1860 NewOpc = 0;
1861 unsigned PredReg = 0;
1862 ARMCC::CondCodes Pred = getInstrPredicate(*Br.MI, PredReg);
1863 if (Pred == ARMCC::EQ)
1864 NewOpc = ARM::tCBZ;
1865 else if (Pred == ARMCC::NE)
1866 NewOpc = ARM::tCBNZ;
1867 if (!NewOpc)
1868 continue;
1869 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1870 // Check if the distance is within 126. Subtract starting offset by 2
1871 // because the cmp will be eliminated.
1872 unsigned BrOffset = BBUtils->getOffsetOf(Br.MI) + 4 - 2;
1873 BBInfoVector &BBInfo = BBUtils->getBBInfo();
1874 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1875 if (BrOffset >= DestOffset || (DestOffset - BrOffset) > 126)
1876 continue;
1878 // Search backwards to find a tCMPi8
1879 auto *TRI = STI->getRegisterInfo();
1880 MachineInstr *CmpMI = findCMPToFoldIntoCBZ(Br.MI, TRI);
1881 if (!CmpMI || CmpMI->getOpcode() != ARM::tCMPi8)
1882 continue;
1884 Register Reg = CmpMI->getOperand(0).getReg();
1886 // Check for Kill flags on Reg. If they are present remove them and set kill
1887 // on the new CBZ.
1888 MachineBasicBlock::iterator KillMI = Br.MI;
1889 bool RegKilled = false;
1890 do {
1891 --KillMI;
1892 if (KillMI->killsRegister(Reg, TRI)) {
1893 KillMI->clearRegisterKills(Reg, TRI);
1894 RegKilled = true;
1895 break;
1897 } while (KillMI != CmpMI);
1899 // Create the new CBZ/CBNZ
1900 MachineBasicBlock *MBB = Br.MI->getParent();
1901 LLVM_DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI);
1902 MachineInstr *NewBR =
1903 BuildMI(*MBB, Br.MI, Br.MI->getDebugLoc(), TII->get(NewOpc))
1904 .addReg(Reg, getKillRegState(RegKilled))
1905 .addMBB(DestBB, Br.MI->getOperand(0).getTargetFlags());
1906 CmpMI->eraseFromParent();
1907 Br.MI->eraseFromParent();
1908 Br.MI = NewBR;
1909 BBInfo[MBB->getNumber()].Size -= 2;
1910 BBUtils->adjustBBOffsetsAfter(MBB);
1911 ++NumCBZ;
1912 MadeChange = true;
1915 return MadeChange;
1918 static bool isSimpleIndexCalc(MachineInstr &I, unsigned EntryReg,
1919 unsigned BaseReg) {
1920 if (I.getOpcode() != ARM::t2ADDrs)
1921 return false;
1923 if (I.getOperand(0).getReg() != EntryReg)
1924 return false;
1926 if (I.getOperand(1).getReg() != BaseReg)
1927 return false;
1929 // FIXME: what about CC and IdxReg?
1930 return true;
1933 /// While trying to form a TBB/TBH instruction, we may (if the table
1934 /// doesn't immediately follow the BR_JT) need access to the start of the
1935 /// jump-table. We know one instruction that produces such a register; this
1936 /// function works out whether that definition can be preserved to the BR_JT,
1937 /// possibly by removing an intervening addition (which is usually needed to
1938 /// calculate the actual entry to jump to).
1939 bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI,
1940 MachineInstr *LEAMI,
1941 unsigned &DeadSize,
1942 bool &CanDeleteLEA,
1943 bool &BaseRegKill) {
1944 if (JumpMI->getParent() != LEAMI->getParent())
1945 return false;
1947 // Now we hope that we have at least these instructions in the basic block:
1948 // BaseReg = t2LEA ...
1949 // [...]
1950 // EntryReg = t2ADDrs BaseReg, ...
1951 // [...]
1952 // t2BR_JT EntryReg
1954 // We have to be very conservative about what we recognise here though. The
1955 // main perturbing factors to watch out for are:
1956 // + Spills at any point in the chain: not direct problems but we would
1957 // expect a blocking Def of the spilled register so in practice what we
1958 // can do is limited.
1959 // + EntryReg == BaseReg: this is the one situation we should allow a Def
1960 // of BaseReg, but only if the t2ADDrs can be removed.
1961 // + Some instruction other than t2ADDrs computing the entry. Not seen in
1962 // the wild, but we should be careful.
1963 Register EntryReg = JumpMI->getOperand(0).getReg();
1964 Register BaseReg = LEAMI->getOperand(0).getReg();
1966 CanDeleteLEA = true;
1967 BaseRegKill = false;
1968 MachineInstr *RemovableAdd = nullptr;
1969 MachineBasicBlock::iterator I(LEAMI);
1970 for (++I; &*I != JumpMI; ++I) {
1971 if (isSimpleIndexCalc(*I, EntryReg, BaseReg)) {
1972 RemovableAdd = &*I;
1973 break;
1976 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
1977 const MachineOperand &MO = I->getOperand(K);
1978 if (!MO.isReg() || !MO.getReg())
1979 continue;
1980 if (MO.isDef() && MO.getReg() == BaseReg)
1981 return false;
1982 if (MO.isUse() && MO.getReg() == BaseReg) {
1983 BaseRegKill = BaseRegKill || MO.isKill();
1984 CanDeleteLEA = false;
1989 if (!RemovableAdd)
1990 return true;
1992 // Check the add really is removable, and that nothing else in the block
1993 // clobbers BaseReg.
1994 for (++I; &*I != JumpMI; ++I) {
1995 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
1996 const MachineOperand &MO = I->getOperand(K);
1997 if (!MO.isReg() || !MO.getReg())
1998 continue;
1999 if (MO.isDef() && MO.getReg() == BaseReg)
2000 return false;
2001 if (MO.isUse() && MO.getReg() == EntryReg)
2002 RemovableAdd = nullptr;
2006 if (RemovableAdd) {
2007 RemovableAdd->eraseFromParent();
2008 DeadSize += isThumb2 ? 4 : 2;
2009 } else if (BaseReg == EntryReg) {
2010 // The add wasn't removable, but clobbered the base for the TBB. So we can't
2011 // preserve it.
2012 return false;
2015 // We reached the end of the block without seeing another definition of
2016 // BaseReg (except, possibly the t2ADDrs, which was removed). BaseReg can be
2017 // used in the TBB/TBH if necessary.
2018 return true;
2021 /// Returns whether CPEMI is the first instruction in the block
2022 /// immediately following JTMI (assumed to be a TBB or TBH terminator). If so,
2023 /// we can switch the first register to PC and usually remove the address
2024 /// calculation that preceded it.
2025 static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
2026 MachineFunction::iterator MBB = JTMI->getParent()->getIterator();
2027 MachineFunction *MF = MBB->getParent();
2028 ++MBB;
2030 return MBB != MF->end() && MBB->begin() != MBB->end() &&
2031 &*MBB->begin() == CPEMI;
2034 static void RemoveDeadAddBetweenLEAAndJT(MachineInstr *LEAMI,
2035 MachineInstr *JumpMI,
2036 unsigned &DeadSize) {
2037 // Remove a dead add between the LEA and JT, which used to compute EntryReg,
2038 // but the JT now uses PC. Finds the last ADD (if any) that def's EntryReg
2039 // and is not clobbered / used.
2040 MachineInstr *RemovableAdd = nullptr;
2041 Register EntryReg = JumpMI->getOperand(0).getReg();
2043 // Find the last ADD to set EntryReg
2044 MachineBasicBlock::iterator I(LEAMI);
2045 for (++I; &*I != JumpMI; ++I) {
2046 if (I->getOpcode() == ARM::t2ADDrs && I->getOperand(0).getReg() == EntryReg)
2047 RemovableAdd = &*I;
2050 if (!RemovableAdd)
2051 return;
2053 // Ensure EntryReg is not clobbered or used.
2054 MachineBasicBlock::iterator J(RemovableAdd);
2055 for (++J; &*J != JumpMI; ++J) {
2056 for (unsigned K = 0, E = J->getNumOperands(); K != E; ++K) {
2057 const MachineOperand &MO = J->getOperand(K);
2058 if (!MO.isReg() || !MO.getReg())
2059 continue;
2060 if (MO.isDef() && MO.getReg() == EntryReg)
2061 return;
2062 if (MO.isUse() && MO.getReg() == EntryReg)
2063 return;
2067 LLVM_DEBUG(dbgs() << "Removing Dead Add: " << *RemovableAdd);
2068 RemovableAdd->eraseFromParent();
2069 DeadSize += 4;
2072 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
2073 /// jumptables when it's possible.
2074 bool ARMConstantIslands::optimizeThumb2JumpTables() {
2075 bool MadeChange = false;
2077 // FIXME: After the tables are shrunk, can we get rid some of the
2078 // constantpool tables?
2079 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2080 if (!MJTI) return false;
2082 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2083 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2084 MachineInstr *MI = T2JumpTables[i];
2085 const MCInstrDesc &MCID = MI->getDesc();
2086 unsigned NumOps = MCID.getNumOperands();
2087 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2088 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2089 unsigned JTI = JTOP.getIndex();
2090 assert(JTI < JT.size());
2092 bool ByteOk = true;
2093 bool HalfWordOk = true;
2094 unsigned JTOffset = BBUtils->getOffsetOf(MI) + 4;
2095 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2096 BBInfoVector &BBInfo = BBUtils->getBBInfo();
2097 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2098 MachineBasicBlock *MBB = JTBBs[j];
2099 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
2100 // Negative offset is not ok. FIXME: We should change BB layout to make
2101 // sure all the branches are forward.
2102 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
2103 ByteOk = false;
2104 unsigned TBHLimit = ((1<<16)-1)*2;
2105 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
2106 HalfWordOk = false;
2107 if (!ByteOk && !HalfWordOk)
2108 break;
2111 if (!ByteOk && !HalfWordOk)
2112 continue;
2114 CPUser &User = CPUsers[JumpTableUserIndices[JTI]];
2115 MachineBasicBlock *MBB = MI->getParent();
2116 if (!MI->getOperand(0).isKill()) // FIXME: needed now?
2117 continue;
2119 unsigned DeadSize = 0;
2120 bool CanDeleteLEA = false;
2121 bool BaseRegKill = false;
2123 unsigned IdxReg = ~0U;
2124 bool IdxRegKill = true;
2125 if (isThumb2) {
2126 IdxReg = MI->getOperand(1).getReg();
2127 IdxRegKill = MI->getOperand(1).isKill();
2129 bool PreservedBaseReg =
2130 preserveBaseRegister(MI, User.MI, DeadSize, CanDeleteLEA, BaseRegKill);
2131 if (!jumpTableFollowsTB(MI, User.CPEMI) && !PreservedBaseReg)
2132 continue;
2133 } else {
2134 // We're in thumb-1 mode, so we must have something like:
2135 // %idx = tLSLri %idx, 2
2136 // %base = tLEApcrelJT
2137 // %t = tLDRr %base, %idx
2138 Register BaseReg = User.MI->getOperand(0).getReg();
2140 if (User.MI->getIterator() == User.MI->getParent()->begin())
2141 continue;
2142 MachineInstr *Shift = User.MI->getPrevNode();
2143 if (Shift->getOpcode() != ARM::tLSLri ||
2144 Shift->getOperand(3).getImm() != 2 ||
2145 !Shift->getOperand(2).isKill())
2146 continue;
2147 IdxReg = Shift->getOperand(2).getReg();
2148 Register ShiftedIdxReg = Shift->getOperand(0).getReg();
2150 // It's important that IdxReg is live until the actual TBB/TBH. Most of
2151 // the range is checked later, but the LEA might still clobber it and not
2152 // actually get removed.
2153 if (BaseReg == IdxReg && !jumpTableFollowsTB(MI, User.CPEMI))
2154 continue;
2156 MachineInstr *Load = User.MI->getNextNode();
2157 if (Load->getOpcode() != ARM::tLDRr)
2158 continue;
2159 if (Load->getOperand(1).getReg() != BaseReg ||
2160 Load->getOperand(2).getReg() != ShiftedIdxReg ||
2161 !Load->getOperand(2).isKill())
2162 continue;
2164 // If we're in PIC mode, there should be another ADD following.
2165 auto *TRI = STI->getRegisterInfo();
2167 // %base cannot be redefined after the load as it will appear before
2168 // TBB/TBH like:
2169 // %base =
2170 // %base =
2171 // tBB %base, %idx
2172 if (registerDefinedBetween(BaseReg, Load->getNextNode(), MBB->end(), TRI))
2173 continue;
2175 if (isPositionIndependentOrROPI) {
2176 MachineInstr *Add = Load->getNextNode();
2177 if (Add->getOpcode() != ARM::tADDrr ||
2178 Add->getOperand(2).getReg() != BaseReg ||
2179 Add->getOperand(3).getReg() != Load->getOperand(0).getReg() ||
2180 !Add->getOperand(3).isKill())
2181 continue;
2182 if (Add->getOperand(0).getReg() != MI->getOperand(0).getReg())
2183 continue;
2184 if (registerDefinedBetween(IdxReg, Add->getNextNode(), MI, TRI))
2185 // IdxReg gets redefined in the middle of the sequence.
2186 continue;
2187 Add->eraseFromParent();
2188 DeadSize += 2;
2189 } else {
2190 if (Load->getOperand(0).getReg() != MI->getOperand(0).getReg())
2191 continue;
2192 if (registerDefinedBetween(IdxReg, Load->getNextNode(), MI, TRI))
2193 // IdxReg gets redefined in the middle of the sequence.
2194 continue;
2197 // Now safe to delete the load and lsl. The LEA will be removed later.
2198 CanDeleteLEA = true;
2199 Shift->eraseFromParent();
2200 Load->eraseFromParent();
2201 DeadSize += 4;
2204 LLVM_DEBUG(dbgs() << "Shrink JT: " << *MI);
2205 MachineInstr *CPEMI = User.CPEMI;
2206 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
2207 if (!isThumb2)
2208 Opc = ByteOk ? ARM::tTBB_JT : ARM::tTBH_JT;
2210 MachineBasicBlock::iterator MI_JT = MI;
2211 MachineInstr *NewJTMI =
2212 BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
2213 .addReg(User.MI->getOperand(0).getReg(),
2214 getKillRegState(BaseRegKill))
2215 .addReg(IdxReg, getKillRegState(IdxRegKill))
2216 .addJumpTableIndex(JTI, JTOP.getTargetFlags())
2217 .addImm(CPEMI->getOperand(0).getImm());
2218 LLVM_DEBUG(dbgs() << printMBBReference(*MBB) << ": " << *NewJTMI);
2220 unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
2221 CPEMI->setDesc(TII->get(JTOpc));
2223 if (jumpTableFollowsTB(MI, User.CPEMI)) {
2224 NewJTMI->getOperand(0).setReg(ARM::PC);
2225 NewJTMI->getOperand(0).setIsKill(false);
2227 if (CanDeleteLEA) {
2228 if (isThumb2)
2229 RemoveDeadAddBetweenLEAAndJT(User.MI, MI, DeadSize);
2231 User.MI->eraseFromParent();
2232 DeadSize += isThumb2 ? 4 : 2;
2234 // The LEA was eliminated, the TBB instruction becomes the only new user
2235 // of the jump table.
2236 User.MI = NewJTMI;
2237 User.MaxDisp = 4;
2238 User.NegOk = false;
2239 User.IsSoImm = false;
2240 User.KnownAlignment = false;
2241 } else {
2242 // The LEA couldn't be eliminated, so we must add another CPUser to
2243 // record the TBB or TBH use.
2244 int CPEntryIdx = JumpTableEntryIndices[JTI];
2245 auto &CPEs = CPEntries[CPEntryIdx];
2246 auto Entry =
2247 find_if(CPEs, [&](CPEntry &E) { return E.CPEMI == User.CPEMI; });
2248 ++Entry->RefCount;
2249 CPUsers.emplace_back(CPUser(NewJTMI, User.CPEMI, 4, false, false));
2253 unsigned NewSize = TII->getInstSizeInBytes(*NewJTMI);
2254 unsigned OrigSize = TII->getInstSizeInBytes(*MI);
2255 MI->eraseFromParent();
2257 int Delta = OrigSize - NewSize + DeadSize;
2258 BBInfo[MBB->getNumber()].Size -= Delta;
2259 BBUtils->adjustBBOffsetsAfter(MBB);
2261 ++NumTBs;
2262 MadeChange = true;
2265 return MadeChange;
2268 /// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
2269 /// jump tables always branch forwards, since that's what tbb and tbh need.
2270 bool ARMConstantIslands::reorderThumb2JumpTables() {
2271 bool MadeChange = false;
2273 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2274 if (!MJTI) return false;
2276 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2277 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2278 MachineInstr *MI = T2JumpTables[i];
2279 const MCInstrDesc &MCID = MI->getDesc();
2280 unsigned NumOps = MCID.getNumOperands();
2281 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2282 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2283 unsigned JTI = JTOP.getIndex();
2284 assert(JTI < JT.size());
2286 // We prefer if target blocks for the jump table come after the jump
2287 // instruction so we can use TB[BH]. Loop through the target blocks
2288 // and try to adjust them such that that's true.
2289 int JTNumber = MI->getParent()->getNumber();
2290 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2291 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2292 MachineBasicBlock *MBB = JTBBs[j];
2293 int DTNumber = MBB->getNumber();
2295 if (DTNumber < JTNumber) {
2296 // The destination precedes the switch. Try to move the block forward
2297 // so we have a positive offset.
2298 MachineBasicBlock *NewBB =
2299 adjustJTTargetBlockForward(MBB, MI->getParent());
2300 if (NewBB)
2301 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
2302 MadeChange = true;
2307 return MadeChange;
2310 MachineBasicBlock *ARMConstantIslands::
2311 adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
2312 // If the destination block is terminated by an unconditional branch,
2313 // try to move it; otherwise, create a new block following the jump
2314 // table that branches back to the actual target. This is a very simple
2315 // heuristic. FIXME: We can definitely improve it.
2316 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
2317 SmallVector<MachineOperand, 4> Cond;
2318 SmallVector<MachineOperand, 4> CondPrior;
2319 MachineFunction::iterator BBi = BB->getIterator();
2320 MachineFunction::iterator OldPrior = std::prev(BBi);
2322 // If the block terminator isn't analyzable, don't try to move the block
2323 bool B = TII->analyzeBranch(*BB, TBB, FBB, Cond);
2325 // If the block ends in an unconditional branch, move it. The prior block
2326 // has to have an analyzable terminator for us to move this one. Be paranoid
2327 // and make sure we're not trying to move the entry block of the function.
2328 if (!B && Cond.empty() && BB != &MF->front() &&
2329 !TII->analyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
2330 BB->moveAfter(JTBB);
2331 OldPrior->updateTerminator();
2332 BB->updateTerminator();
2333 // Update numbering to account for the block being moved.
2334 MF->RenumberBlocks();
2335 ++NumJTMoved;
2336 return nullptr;
2339 // Create a new MBB for the code after the jump BB.
2340 MachineBasicBlock *NewBB =
2341 MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
2342 MachineFunction::iterator MBBI = ++JTBB->getIterator();
2343 MF->insert(MBBI, NewBB);
2345 // Copy live-in information to new block.
2346 for (const MachineBasicBlock::RegisterMaskPair &RegMaskPair : BB->liveins())
2347 NewBB->addLiveIn(RegMaskPair);
2349 // Add an unconditional branch from NewBB to BB.
2350 // There doesn't seem to be meaningful DebugInfo available; this doesn't
2351 // correspond directly to anything in the source.
2352 if (isThumb2)
2353 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B))
2354 .addMBB(BB)
2355 .add(predOps(ARMCC::AL));
2356 else
2357 BuildMI(NewBB, DebugLoc(), TII->get(ARM::tB))
2358 .addMBB(BB)
2359 .add(predOps(ARMCC::AL));
2361 // Update internal data structures to account for the newly inserted MBB.
2362 MF->RenumberBlocks(NewBB);
2364 // Update the CFG.
2365 NewBB->addSuccessor(BB);
2366 JTBB->replaceSuccessor(BB, NewBB);
2368 ++NumJTInserted;
2369 return NewBB;
2372 /// createARMConstantIslandPass - returns an instance of the constpool
2373 /// island pass.
2374 FunctionPass *llvm::createARMConstantIslandPass() {
2375 return new ARMConstantIslands();
2378 INITIALIZE_PASS(ARMConstantIslands, "arm-cp-islands", ARM_CP_ISLANDS_OPT_NAME,
2379 false, false)