1 //===- ARMInstructionSelector.cpp ----------------------------*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for ARM.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
13 #include "ARMRegisterBankInfo.h"
14 #include "ARMSubtarget.h"
15 #include "ARMTargetMachine.h"
16 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
17 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
18 #include "llvm/CodeGen/MachineConstantPool.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/Support/Debug.h"
22 #define DEBUG_TYPE "arm-isel"
28 #define GET_GLOBALISEL_PREDICATE_BITSET
29 #include "ARMGenGlobalISel.inc"
30 #undef GET_GLOBALISEL_PREDICATE_BITSET
32 class ARMInstructionSelector
: public InstructionSelector
{
34 ARMInstructionSelector(const ARMBaseTargetMachine
&TM
, const ARMSubtarget
&STI
,
35 const ARMRegisterBankInfo
&RBI
);
37 bool select(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const override
;
38 static const char *getName() { return DEBUG_TYPE
; }
41 bool selectImpl(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const;
46 bool selectCmp(CmpConstants Helper
, MachineInstrBuilder
&MIB
,
47 MachineRegisterInfo
&MRI
) const;
49 // Helper for inserting a comparison sequence that sets \p ResReg to either 1
50 // if \p LHSReg and \p RHSReg are in the relationship defined by \p Cond, or
51 // \p PrevRes otherwise. In essence, it computes PrevRes OR (LHS Cond RHS).
52 bool insertComparison(CmpConstants Helper
, InsertInfo I
, unsigned ResReg
,
53 ARMCC::CondCodes Cond
, unsigned LHSReg
, unsigned RHSReg
,
54 unsigned PrevRes
) const;
56 // Set \p DestReg to \p Constant.
57 void putConstant(InsertInfo I
, unsigned DestReg
, unsigned Constant
) const;
59 bool selectGlobal(MachineInstrBuilder
&MIB
, MachineRegisterInfo
&MRI
) const;
60 bool selectSelect(MachineInstrBuilder
&MIB
, MachineRegisterInfo
&MRI
) const;
61 bool selectShift(unsigned ShiftOpc
, MachineInstrBuilder
&MIB
) const;
63 // Check if the types match and both operands have the expected size and
65 bool validOpRegPair(MachineRegisterInfo
&MRI
, unsigned LHS
, unsigned RHS
,
66 unsigned ExpectedSize
, unsigned ExpectedRegBankID
) const;
68 // Check if the register has the expected size and register bank.
69 bool validReg(MachineRegisterInfo
&MRI
, unsigned Reg
, unsigned ExpectedSize
,
70 unsigned ExpectedRegBankID
) const;
72 const ARMBaseInstrInfo
&TII
;
73 const ARMBaseRegisterInfo
&TRI
;
74 const ARMBaseTargetMachine
&TM
;
75 const ARMRegisterBankInfo
&RBI
;
76 const ARMSubtarget
&STI
;
78 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
79 // uses "STI." in the code generated by TableGen. If we want to reuse some of
80 // the custom C++ predicates written for DAGISel, we need to have both around.
81 const ARMSubtarget
*Subtarget
= &STI
;
83 // Store the opcodes that we might need, so we don't have to check what kind
84 // of subtarget (ARM vs Thumb) we have all the time.
92 // Used for implementing ZEXT/SEXT from i1
119 // Used for G_GLOBAL_VALUE
121 unsigned ConstPoolLoad
;
122 unsigned MOV_ga_pcrel
;
123 unsigned LDRLIT_ga_pcrel
;
124 unsigned LDRLIT_ga_abs
;
126 OpcodeCache(const ARMSubtarget
&STI
);
129 // Select the opcode for simple extensions (that translate to a single SXT/UXT
130 // instruction). Extension operations more complicated than that should not
131 // invoke this. Returns the original opcode if it doesn't know how to select a
133 unsigned selectSimpleExtOpc(unsigned Opc
, unsigned Size
) const;
135 // Select the opcode for simple loads and stores. Returns the original opcode
136 // if it doesn't know how to select a better one.
137 unsigned selectLoadStoreOpCode(unsigned Opc
, unsigned RegBank
,
138 unsigned Size
) const;
140 void renderVFPF32Imm(MachineInstrBuilder
&New
, const MachineInstr
&Old
) const;
141 void renderVFPF64Imm(MachineInstrBuilder
&New
, const MachineInstr
&Old
) const;
143 #define GET_GLOBALISEL_PREDICATES_DECL
144 #include "ARMGenGlobalISel.inc"
145 #undef GET_GLOBALISEL_PREDICATES_DECL
147 // We declare the temporaries used by selectImpl() in the class to minimize the
148 // cost of constructing placeholder values.
149 #define GET_GLOBALISEL_TEMPORARIES_DECL
150 #include "ARMGenGlobalISel.inc"
151 #undef GET_GLOBALISEL_TEMPORARIES_DECL
153 } // end anonymous namespace
156 InstructionSelector
*
157 createARMInstructionSelector(const ARMBaseTargetMachine
&TM
,
158 const ARMSubtarget
&STI
,
159 const ARMRegisterBankInfo
&RBI
) {
160 return new ARMInstructionSelector(TM
, STI
, RBI
);
164 const unsigned zero_reg
= 0;
166 #define GET_GLOBALISEL_IMPL
167 #include "ARMGenGlobalISel.inc"
168 #undef GET_GLOBALISEL_IMPL
170 ARMInstructionSelector::ARMInstructionSelector(const ARMBaseTargetMachine
&TM
,
171 const ARMSubtarget
&STI
,
172 const ARMRegisterBankInfo
&RBI
)
173 : InstructionSelector(), TII(*STI
.getInstrInfo()),
174 TRI(*STI
.getRegisterInfo()), TM(TM
), RBI(RBI
), STI(STI
), Opcodes(STI
),
175 #define GET_GLOBALISEL_PREDICATES_INIT
176 #include "ARMGenGlobalISel.inc"
177 #undef GET_GLOBALISEL_PREDICATES_INIT
178 #define GET_GLOBALISEL_TEMPORARIES_INIT
179 #include "ARMGenGlobalISel.inc"
180 #undef GET_GLOBALISEL_TEMPORARIES_INIT
184 static const TargetRegisterClass
*guessRegClass(unsigned Reg
,
185 MachineRegisterInfo
&MRI
,
186 const TargetRegisterInfo
&TRI
,
187 const RegisterBankInfo
&RBI
) {
188 const RegisterBank
*RegBank
= RBI
.getRegBank(Reg
, MRI
, TRI
);
189 assert(RegBank
&& "Can't get reg bank for virtual register");
191 const unsigned Size
= MRI
.getType(Reg
).getSizeInBits();
192 assert((RegBank
->getID() == ARM::GPRRegBankID
||
193 RegBank
->getID() == ARM::FPRRegBankID
) &&
194 "Unsupported reg bank");
196 if (RegBank
->getID() == ARM::FPRRegBankID
) {
198 return &ARM::SPRRegClass
;
200 return &ARM::DPRRegClass
;
201 else if (Size
== 128)
202 return &ARM::QPRRegClass
;
204 llvm_unreachable("Unsupported destination size");
207 return &ARM::GPRRegClass
;
210 static bool selectCopy(MachineInstr
&I
, const TargetInstrInfo
&TII
,
211 MachineRegisterInfo
&MRI
, const TargetRegisterInfo
&TRI
,
212 const RegisterBankInfo
&RBI
) {
213 unsigned DstReg
= I
.getOperand(0).getReg();
214 if (TargetRegisterInfo::isPhysicalRegister(DstReg
))
217 const TargetRegisterClass
*RC
= guessRegClass(DstReg
, MRI
, TRI
, RBI
);
219 // No need to constrain SrcReg. It will get constrained when
220 // we hit another of its uses or its defs.
221 // Copies do not have constraints.
222 if (!RBI
.constrainGenericRegister(DstReg
, *RC
, MRI
)) {
223 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
230 static bool selectMergeValues(MachineInstrBuilder
&MIB
,
231 const ARMBaseInstrInfo
&TII
,
232 MachineRegisterInfo
&MRI
,
233 const TargetRegisterInfo
&TRI
,
234 const RegisterBankInfo
&RBI
) {
235 assert(TII
.getSubtarget().hasVFP2Base() && "Can't select merge without VFP");
237 // We only support G_MERGE_VALUES as a way to stick together two scalar GPRs
239 unsigned VReg0
= MIB
->getOperand(0).getReg();
241 assert(MRI
.getType(VReg0
).getSizeInBits() == 64 &&
242 RBI
.getRegBank(VReg0
, MRI
, TRI
)->getID() == ARM::FPRRegBankID
&&
243 "Unsupported operand for G_MERGE_VALUES");
244 unsigned VReg1
= MIB
->getOperand(1).getReg();
246 assert(MRI
.getType(VReg1
).getSizeInBits() == 32 &&
247 RBI
.getRegBank(VReg1
, MRI
, TRI
)->getID() == ARM::GPRRegBankID
&&
248 "Unsupported operand for G_MERGE_VALUES");
249 unsigned VReg2
= MIB
->getOperand(2).getReg();
251 assert(MRI
.getType(VReg2
).getSizeInBits() == 32 &&
252 RBI
.getRegBank(VReg2
, MRI
, TRI
)->getID() == ARM::GPRRegBankID
&&
253 "Unsupported operand for G_MERGE_VALUES");
255 MIB
->setDesc(TII
.get(ARM::VMOVDRR
));
256 MIB
.add(predOps(ARMCC::AL
));
261 static bool selectUnmergeValues(MachineInstrBuilder
&MIB
,
262 const ARMBaseInstrInfo
&TII
,
263 MachineRegisterInfo
&MRI
,
264 const TargetRegisterInfo
&TRI
,
265 const RegisterBankInfo
&RBI
) {
266 assert(TII
.getSubtarget().hasVFP2Base() &&
267 "Can't select unmerge without VFP");
269 // We only support G_UNMERGE_VALUES as a way to break up one DPR into two
271 unsigned VReg0
= MIB
->getOperand(0).getReg();
273 assert(MRI
.getType(VReg0
).getSizeInBits() == 32 &&
274 RBI
.getRegBank(VReg0
, MRI
, TRI
)->getID() == ARM::GPRRegBankID
&&
275 "Unsupported operand for G_UNMERGE_VALUES");
276 unsigned VReg1
= MIB
->getOperand(1).getReg();
278 assert(MRI
.getType(VReg1
).getSizeInBits() == 32 &&
279 RBI
.getRegBank(VReg1
, MRI
, TRI
)->getID() == ARM::GPRRegBankID
&&
280 "Unsupported operand for G_UNMERGE_VALUES");
281 unsigned VReg2
= MIB
->getOperand(2).getReg();
283 assert(MRI
.getType(VReg2
).getSizeInBits() == 64 &&
284 RBI
.getRegBank(VReg2
, MRI
, TRI
)->getID() == ARM::FPRRegBankID
&&
285 "Unsupported operand for G_UNMERGE_VALUES");
287 MIB
->setDesc(TII
.get(ARM::VMOVRRD
));
288 MIB
.add(predOps(ARMCC::AL
));
293 ARMInstructionSelector::OpcodeCache::OpcodeCache(const ARMSubtarget
&STI
) {
294 bool isThumb
= STI
.isThumb();
296 using namespace TargetOpcode
;
298 #define STORE_OPCODE(VAR, OPC) VAR = isThumb ? ARM::t2##OPC : ARM::OPC
299 STORE_OPCODE(SEXT16
, SXTH
);
300 STORE_OPCODE(ZEXT16
, UXTH
);
302 STORE_OPCODE(SEXT8
, SXTB
);
303 STORE_OPCODE(ZEXT8
, UXTB
);
305 STORE_OPCODE(AND
, ANDri
);
306 STORE_OPCODE(RSB
, RSBri
);
308 STORE_OPCODE(STORE32
, STRi12
);
309 STORE_OPCODE(LOAD32
, LDRi12
);
311 // LDRH/STRH are special...
312 STORE16
= isThumb
? ARM::t2STRHi12
: ARM::STRH
;
313 LOAD16
= isThumb
? ARM::t2LDRHi12
: ARM::LDRH
;
315 STORE_OPCODE(STORE8
, STRBi12
);
316 STORE_OPCODE(LOAD8
, LDRBi12
);
318 STORE_OPCODE(ADDrr
, ADDrr
);
319 STORE_OPCODE(ADDri
, ADDri
);
321 STORE_OPCODE(CMPrr
, CMPrr
);
322 STORE_OPCODE(MOVi
, MOVi
);
323 STORE_OPCODE(MOVCCi
, MOVCCi
);
325 STORE_OPCODE(MOVCCr
, MOVCCr
);
327 STORE_OPCODE(TSTri
, TSTri
);
328 STORE_OPCODE(Bcc
, Bcc
);
330 STORE_OPCODE(MOVi32imm
, MOVi32imm
);
331 ConstPoolLoad
= isThumb
? ARM::t2LDRpci
: ARM::LDRi12
;
332 STORE_OPCODE(MOV_ga_pcrel
, MOV_ga_pcrel
);
333 LDRLIT_ga_pcrel
= isThumb
? ARM::tLDRLIT_ga_pcrel
: ARM::LDRLIT_ga_pcrel
;
334 LDRLIT_ga_abs
= isThumb
? ARM::tLDRLIT_ga_abs
: ARM::LDRLIT_ga_abs
;
338 unsigned ARMInstructionSelector::selectSimpleExtOpc(unsigned Opc
,
339 unsigned Size
) const {
340 using namespace TargetOpcode
;
342 if (Size
!= 8 && Size
!= 16)
346 return Size
== 8 ? Opcodes
.SEXT8
: Opcodes
.SEXT16
;
349 return Size
== 8 ? Opcodes
.ZEXT8
: Opcodes
.ZEXT16
;
354 unsigned ARMInstructionSelector::selectLoadStoreOpCode(unsigned Opc
,
356 unsigned Size
) const {
357 bool isStore
= Opc
== TargetOpcode::G_STORE
;
359 if (RegBank
== ARM::GPRRegBankID
) {
363 return isStore
? Opcodes
.STORE8
: Opcodes
.LOAD8
;
365 return isStore
? Opcodes
.STORE16
: Opcodes
.LOAD16
;
367 return isStore
? Opcodes
.STORE32
: Opcodes
.LOAD32
;
373 if (RegBank
== ARM::FPRRegBankID
) {
376 return isStore
? ARM::VSTRS
: ARM::VLDRS
;
378 return isStore
? ARM::VSTRD
: ARM::VLDRD
;
387 // When lowering comparisons, we sometimes need to perform two compares instead
388 // of just one. Get the condition codes for both comparisons. If only one is
389 // needed, the second member of the pair is ARMCC::AL.
390 static std::pair
<ARMCC::CondCodes
, ARMCC::CondCodes
>
391 getComparePreds(CmpInst::Predicate Pred
) {
392 std::pair
<ARMCC::CondCodes
, ARMCC::CondCodes
> Preds
= {ARMCC::AL
, ARMCC::AL
};
394 case CmpInst::FCMP_ONE
:
395 Preds
= {ARMCC::GT
, ARMCC::MI
};
397 case CmpInst::FCMP_UEQ
:
398 Preds
= {ARMCC::EQ
, ARMCC::VS
};
400 case CmpInst::ICMP_EQ
:
401 case CmpInst::FCMP_OEQ
:
402 Preds
.first
= ARMCC::EQ
;
404 case CmpInst::ICMP_SGT
:
405 case CmpInst::FCMP_OGT
:
406 Preds
.first
= ARMCC::GT
;
408 case CmpInst::ICMP_SGE
:
409 case CmpInst::FCMP_OGE
:
410 Preds
.first
= ARMCC::GE
;
412 case CmpInst::ICMP_UGT
:
413 case CmpInst::FCMP_UGT
:
414 Preds
.first
= ARMCC::HI
;
416 case CmpInst::FCMP_OLT
:
417 Preds
.first
= ARMCC::MI
;
419 case CmpInst::ICMP_ULE
:
420 case CmpInst::FCMP_OLE
:
421 Preds
.first
= ARMCC::LS
;
423 case CmpInst::FCMP_ORD
:
424 Preds
.first
= ARMCC::VC
;
426 case CmpInst::FCMP_UNO
:
427 Preds
.first
= ARMCC::VS
;
429 case CmpInst::FCMP_UGE
:
430 Preds
.first
= ARMCC::PL
;
432 case CmpInst::ICMP_SLT
:
433 case CmpInst::FCMP_ULT
:
434 Preds
.first
= ARMCC::LT
;
436 case CmpInst::ICMP_SLE
:
437 case CmpInst::FCMP_ULE
:
438 Preds
.first
= ARMCC::LE
;
440 case CmpInst::FCMP_UNE
:
441 case CmpInst::ICMP_NE
:
442 Preds
.first
= ARMCC::NE
;
444 case CmpInst::ICMP_UGE
:
445 Preds
.first
= ARMCC::HS
;
447 case CmpInst::ICMP_ULT
:
448 Preds
.first
= ARMCC::LO
;
453 assert(Preds
.first
!= ARMCC::AL
&& "No comparisons needed?");
457 struct ARMInstructionSelector::CmpConstants
{
458 CmpConstants(unsigned CmpOpcode
, unsigned FlagsOpcode
, unsigned SelectOpcode
,
459 unsigned OpRegBank
, unsigned OpSize
)
460 : ComparisonOpcode(CmpOpcode
), ReadFlagsOpcode(FlagsOpcode
),
461 SelectResultOpcode(SelectOpcode
), OperandRegBankID(OpRegBank
),
462 OperandSize(OpSize
) {}
464 // The opcode used for performing the comparison.
465 const unsigned ComparisonOpcode
;
467 // The opcode used for reading the flags set by the comparison. May be
468 // ARM::INSTRUCTION_LIST_END if we don't need to read the flags.
469 const unsigned ReadFlagsOpcode
;
471 // The opcode used for materializing the result of the comparison.
472 const unsigned SelectResultOpcode
;
474 // The assumed register bank ID for the operands.
475 const unsigned OperandRegBankID
;
477 // The assumed size in bits for the operands.
478 const unsigned OperandSize
;
481 struct ARMInstructionSelector::InsertInfo
{
482 InsertInfo(MachineInstrBuilder
&MIB
)
483 : MBB(*MIB
->getParent()), InsertBefore(std::next(MIB
->getIterator())),
484 DbgLoc(MIB
->getDebugLoc()) {}
486 MachineBasicBlock
&MBB
;
487 const MachineBasicBlock::instr_iterator InsertBefore
;
488 const DebugLoc
&DbgLoc
;
491 void ARMInstructionSelector::putConstant(InsertInfo I
, unsigned DestReg
,
492 unsigned Constant
) const {
493 (void)BuildMI(I
.MBB
, I
.InsertBefore
, I
.DbgLoc
, TII
.get(Opcodes
.MOVi
))
496 .add(predOps(ARMCC::AL
))
500 bool ARMInstructionSelector::validOpRegPair(MachineRegisterInfo
&MRI
,
501 unsigned LHSReg
, unsigned RHSReg
,
502 unsigned ExpectedSize
,
503 unsigned ExpectedRegBankID
) const {
504 return MRI
.getType(LHSReg
) == MRI
.getType(RHSReg
) &&
505 validReg(MRI
, LHSReg
, ExpectedSize
, ExpectedRegBankID
) &&
506 validReg(MRI
, RHSReg
, ExpectedSize
, ExpectedRegBankID
);
509 bool ARMInstructionSelector::validReg(MachineRegisterInfo
&MRI
, unsigned Reg
,
510 unsigned ExpectedSize
,
511 unsigned ExpectedRegBankID
) const {
512 if (MRI
.getType(Reg
).getSizeInBits() != ExpectedSize
) {
513 LLVM_DEBUG(dbgs() << "Unexpected size for register");
517 if (RBI
.getRegBank(Reg
, MRI
, TRI
)->getID() != ExpectedRegBankID
) {
518 LLVM_DEBUG(dbgs() << "Unexpected register bank for register");
525 bool ARMInstructionSelector::selectCmp(CmpConstants Helper
,
526 MachineInstrBuilder
&MIB
,
527 MachineRegisterInfo
&MRI
) const {
528 const InsertInfo
I(MIB
);
530 auto ResReg
= MIB
->getOperand(0).getReg();
531 if (!validReg(MRI
, ResReg
, 1, ARM::GPRRegBankID
))
535 static_cast<CmpInst::Predicate
>(MIB
->getOperand(1).getPredicate());
536 if (Cond
== CmpInst::FCMP_TRUE
|| Cond
== CmpInst::FCMP_FALSE
) {
537 putConstant(I
, ResReg
, Cond
== CmpInst::FCMP_TRUE
? 1 : 0);
538 MIB
->eraseFromParent();
542 auto LHSReg
= MIB
->getOperand(2).getReg();
543 auto RHSReg
= MIB
->getOperand(3).getReg();
544 if (!validOpRegPair(MRI
, LHSReg
, RHSReg
, Helper
.OperandSize
,
545 Helper
.OperandRegBankID
))
548 auto ARMConds
= getComparePreds(Cond
);
549 auto ZeroReg
= MRI
.createVirtualRegister(&ARM::GPRRegClass
);
550 putConstant(I
, ZeroReg
, 0);
552 if (ARMConds
.second
== ARMCC::AL
) {
553 // Simple case, we only need one comparison and we're done.
554 if (!insertComparison(Helper
, I
, ResReg
, ARMConds
.first
, LHSReg
, RHSReg
,
558 // Not so simple, we need two successive comparisons.
559 auto IntermediateRes
= MRI
.createVirtualRegister(&ARM::GPRRegClass
);
560 if (!insertComparison(Helper
, I
, IntermediateRes
, ARMConds
.first
, LHSReg
,
563 if (!insertComparison(Helper
, I
, ResReg
, ARMConds
.second
, LHSReg
, RHSReg
,
568 MIB
->eraseFromParent();
572 bool ARMInstructionSelector::insertComparison(CmpConstants Helper
, InsertInfo I
,
574 ARMCC::CondCodes Cond
,
575 unsigned LHSReg
, unsigned RHSReg
,
576 unsigned PrevRes
) const {
577 // Perform the comparison.
579 BuildMI(I
.MBB
, I
.InsertBefore
, I
.DbgLoc
, TII
.get(Helper
.ComparisonOpcode
))
582 .add(predOps(ARMCC::AL
));
583 if (!constrainSelectedInstRegOperands(*CmpI
, TII
, TRI
, RBI
))
586 // Read the comparison flags (if necessary).
587 if (Helper
.ReadFlagsOpcode
!= ARM::INSTRUCTION_LIST_END
) {
588 auto ReadI
= BuildMI(I
.MBB
, I
.InsertBefore
, I
.DbgLoc
,
589 TII
.get(Helper
.ReadFlagsOpcode
))
590 .add(predOps(ARMCC::AL
));
591 if (!constrainSelectedInstRegOperands(*ReadI
, TII
, TRI
, RBI
))
595 // Select either 1 or the previous result based on the value of the flags.
596 auto Mov1I
= BuildMI(I
.MBB
, I
.InsertBefore
, I
.DbgLoc
,
597 TII
.get(Helper
.SelectResultOpcode
))
601 .add(predOps(Cond
, ARM::CPSR
));
602 if (!constrainSelectedInstRegOperands(*Mov1I
, TII
, TRI
, RBI
))
608 bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder
&MIB
,
609 MachineRegisterInfo
&MRI
) const {
610 if ((STI
.isROPI() || STI
.isRWPI()) && !STI
.isTargetELF()) {
611 LLVM_DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n");
615 auto GV
= MIB
->getOperand(1).getGlobal();
616 if (GV
->isThreadLocal()) {
617 LLVM_DEBUG(dbgs() << "TLS variables not supported yet\n");
621 auto &MBB
= *MIB
->getParent();
622 auto &MF
= *MBB
.getParent();
624 bool UseMovt
= STI
.useMovt();
626 unsigned Size
= TM
.getPointerSize(0);
627 unsigned Alignment
= 4;
629 auto addOpsForConstantPoolLoad
= [&MF
, Alignment
,
630 Size
](MachineInstrBuilder
&MIB
,
631 const GlobalValue
*GV
, bool IsSBREL
) {
632 assert((MIB
->getOpcode() == ARM::LDRi12
||
633 MIB
->getOpcode() == ARM::t2LDRpci
) &&
634 "Unsupported instruction");
635 auto ConstPool
= MF
.getConstantPool();
637 // For SB relative entries we need a target-specific constant pool.
638 // Otherwise, just use a regular constant pool entry.
640 ? ConstPool
->getConstantPoolIndex(
641 ARMConstantPoolConstant::Create(GV
, ARMCP::SBREL
), Alignment
)
642 : ConstPool
->getConstantPoolIndex(GV
, Alignment
);
643 MIB
.addConstantPoolIndex(CPIndex
, /*Offset*/ 0, /*TargetFlags*/ 0)
644 .addMemOperand(MF
.getMachineMemOperand(
645 MachinePointerInfo::getConstantPool(MF
), MachineMemOperand::MOLoad
,
647 if (MIB
->getOpcode() == ARM::LDRi12
)
649 MIB
.add(predOps(ARMCC::AL
));
652 auto addGOTMemOperand
= [this, &MF
, Alignment
](MachineInstrBuilder
&MIB
) {
653 MIB
.addMemOperand(MF
.getMachineMemOperand(
654 MachinePointerInfo::getGOT(MF
), MachineMemOperand::MOLoad
,
655 TM
.getProgramPointerSize(), Alignment
));
658 if (TM
.isPositionIndependent()) {
659 bool Indirect
= STI
.isGVIndirectSymbol(GV
);
661 // For ARM mode, we have different pseudoinstructions for direct accesses
662 // and indirect accesses, and the ones for indirect accesses include the
663 // load from GOT. For Thumb mode, we use the same pseudoinstruction for both
664 // direct and indirect accesses, and we need to manually generate the load
666 bool UseOpcodeThatLoads
= Indirect
&& !STI
.isThumb();
668 // FIXME: Taking advantage of MOVT for ELF is pretty involved, so we don't
669 // support it yet. See PR28229.
671 UseMovt
&& !STI
.isTargetELF()
672 ? (UseOpcodeThatLoads
? (unsigned)ARM::MOV_ga_pcrel_ldr
673 : Opcodes
.MOV_ga_pcrel
)
674 : (UseOpcodeThatLoads
? (unsigned)ARM::LDRLIT_ga_pcrel_ldr
675 : Opcodes
.LDRLIT_ga_pcrel
);
676 MIB
->setDesc(TII
.get(Opc
));
678 int TargetFlags
= ARMII::MO_NO_FLAG
;
679 if (STI
.isTargetDarwin())
680 TargetFlags
|= ARMII::MO_NONLAZY
;
681 if (STI
.isGVInGOT(GV
))
682 TargetFlags
|= ARMII::MO_GOT
;
683 MIB
->getOperand(1).setTargetFlags(TargetFlags
);
686 if (!UseOpcodeThatLoads
) {
687 auto ResultReg
= MIB
->getOperand(0).getReg();
688 auto AddressReg
= MRI
.createVirtualRegister(&ARM::GPRRegClass
);
690 MIB
->getOperand(0).setReg(AddressReg
);
692 auto InsertBefore
= std::next(MIB
->getIterator());
693 auto MIBLoad
= BuildMI(MBB
, InsertBefore
, MIB
->getDebugLoc(),
694 TII
.get(Opcodes
.LOAD32
))
698 .add(predOps(ARMCC::AL
));
699 addGOTMemOperand(MIBLoad
);
701 if (!constrainSelectedInstRegOperands(*MIBLoad
, TII
, TRI
, RBI
))
704 addGOTMemOperand(MIB
);
708 return constrainSelectedInstRegOperands(*MIB
, TII
, TRI
, RBI
);
711 bool isReadOnly
= STI
.getTargetLowering()->isReadOnly(GV
);
712 if (STI
.isROPI() && isReadOnly
) {
713 unsigned Opc
= UseMovt
? Opcodes
.MOV_ga_pcrel
: Opcodes
.LDRLIT_ga_pcrel
;
714 MIB
->setDesc(TII
.get(Opc
));
715 return constrainSelectedInstRegOperands(*MIB
, TII
, TRI
, RBI
);
717 if (STI
.isRWPI() && !isReadOnly
) {
718 auto Offset
= MRI
.createVirtualRegister(&ARM::GPRRegClass
);
719 MachineInstrBuilder OffsetMIB
;
721 OffsetMIB
= BuildMI(MBB
, *MIB
, MIB
->getDebugLoc(),
722 TII
.get(Opcodes
.MOVi32imm
), Offset
);
723 OffsetMIB
.addGlobalAddress(GV
, /*Offset*/ 0, ARMII::MO_SBREL
);
725 // Load the offset from the constant pool.
726 OffsetMIB
= BuildMI(MBB
, *MIB
, MIB
->getDebugLoc(),
727 TII
.get(Opcodes
.ConstPoolLoad
), Offset
);
728 addOpsForConstantPoolLoad(OffsetMIB
, GV
, /*IsSBREL*/ true);
730 if (!constrainSelectedInstRegOperands(*OffsetMIB
, TII
, TRI
, RBI
))
733 // Add the offset to the SB register.
734 MIB
->setDesc(TII
.get(Opcodes
.ADDrr
));
735 MIB
->RemoveOperand(1);
736 MIB
.addReg(ARM::R9
) // FIXME: don't hardcode R9
738 .add(predOps(ARMCC::AL
))
741 return constrainSelectedInstRegOperands(*MIB
, TII
, TRI
, RBI
);
744 if (STI
.isTargetELF()) {
746 MIB
->setDesc(TII
.get(Opcodes
.MOVi32imm
));
748 // Load the global's address from the constant pool.
749 MIB
->setDesc(TII
.get(Opcodes
.ConstPoolLoad
));
750 MIB
->RemoveOperand(1);
751 addOpsForConstantPoolLoad(MIB
, GV
, /*IsSBREL*/ false);
753 } else if (STI
.isTargetMachO()) {
755 MIB
->setDesc(TII
.get(Opcodes
.MOVi32imm
));
757 MIB
->setDesc(TII
.get(Opcodes
.LDRLIT_ga_abs
));
759 LLVM_DEBUG(dbgs() << "Object format not supported yet\n");
763 return constrainSelectedInstRegOperands(*MIB
, TII
, TRI
, RBI
);
766 bool ARMInstructionSelector::selectSelect(MachineInstrBuilder
&MIB
,
767 MachineRegisterInfo
&MRI
) const {
768 auto &MBB
= *MIB
->getParent();
769 auto InsertBefore
= std::next(MIB
->getIterator());
770 auto &DbgLoc
= MIB
->getDebugLoc();
772 // Compare the condition to 1.
773 auto CondReg
= MIB
->getOperand(1).getReg();
774 assert(validReg(MRI
, CondReg
, 1, ARM::GPRRegBankID
) &&
775 "Unsupported types for select operation");
776 auto CmpI
= BuildMI(MBB
, InsertBefore
, DbgLoc
, TII
.get(Opcodes
.TSTri
))
779 .add(predOps(ARMCC::AL
));
780 if (!constrainSelectedInstRegOperands(*CmpI
, TII
, TRI
, RBI
))
783 // Move a value into the result register based on the result of the
785 auto ResReg
= MIB
->getOperand(0).getReg();
786 auto TrueReg
= MIB
->getOperand(2).getReg();
787 auto FalseReg
= MIB
->getOperand(3).getReg();
788 assert(validOpRegPair(MRI
, ResReg
, TrueReg
, 32, ARM::GPRRegBankID
) &&
789 validOpRegPair(MRI
, TrueReg
, FalseReg
, 32, ARM::GPRRegBankID
) &&
790 "Unsupported types for select operation");
791 auto Mov1I
= BuildMI(MBB
, InsertBefore
, DbgLoc
, TII
.get(Opcodes
.MOVCCr
))
795 .add(predOps(ARMCC::EQ
, ARM::CPSR
));
796 if (!constrainSelectedInstRegOperands(*Mov1I
, TII
, TRI
, RBI
))
799 MIB
->eraseFromParent();
803 bool ARMInstructionSelector::selectShift(unsigned ShiftOpc
,
804 MachineInstrBuilder
&MIB
) const {
805 assert(!STI
.isThumb() && "Unsupported subtarget");
806 MIB
->setDesc(TII
.get(ARM::MOVsr
));
807 MIB
.addImm(ShiftOpc
);
808 MIB
.add(predOps(ARMCC::AL
)).add(condCodeOp());
809 return constrainSelectedInstRegOperands(*MIB
, TII
, TRI
, RBI
);
812 void ARMInstructionSelector::renderVFPF32Imm(
813 MachineInstrBuilder
&NewInstBuilder
, const MachineInstr
&OldInst
) const {
814 assert(OldInst
.getOpcode() == TargetOpcode::G_FCONSTANT
&&
815 "Expected G_FCONSTANT");
817 APFloat FPImmValue
= OldInst
.getOperand(1).getFPImm()->getValueAPF();
818 int FPImmEncoding
= ARM_AM::getFP32Imm(FPImmValue
);
819 assert(FPImmEncoding
!= -1 && "Invalid immediate value");
821 NewInstBuilder
.addImm(FPImmEncoding
);
824 void ARMInstructionSelector::renderVFPF64Imm(
825 MachineInstrBuilder
&NewInstBuilder
, const MachineInstr
&OldInst
) const {
826 assert(OldInst
.getOpcode() == TargetOpcode::G_FCONSTANT
&&
827 "Expected G_FCONSTANT");
829 APFloat FPImmValue
= OldInst
.getOperand(1).getFPImm()->getValueAPF();
830 int FPImmEncoding
= ARM_AM::getFP64Imm(FPImmValue
);
831 assert(FPImmEncoding
!= -1 && "Invalid immediate value");
833 NewInstBuilder
.addImm(FPImmEncoding
);
836 bool ARMInstructionSelector::select(MachineInstr
&I
,
837 CodeGenCoverage
&CoverageInfo
) const {
838 assert(I
.getParent() && "Instruction should be in a basic block!");
839 assert(I
.getParent()->getParent() && "Instruction should be in a function!");
841 auto &MBB
= *I
.getParent();
842 auto &MF
= *MBB
.getParent();
843 auto &MRI
= MF
.getRegInfo();
845 if (!isPreISelGenericOpcode(I
.getOpcode())) {
847 return selectCopy(I
, TII
, MRI
, TRI
, RBI
);
852 using namespace TargetOpcode
;
854 if (selectImpl(I
, CoverageInfo
))
857 MachineInstrBuilder MIB
{MF
, I
};
860 switch (I
.getOpcode()) {
865 assert(MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits() <= 32 &&
866 "Unsupported destination size for extension");
868 LLT SrcTy
= MRI
.getType(I
.getOperand(1).getReg());
869 unsigned SrcSize
= SrcTy
.getSizeInBits();
872 // ZExt boils down to & 0x1; for SExt we also subtract that from 0
873 I
.setDesc(TII
.get(Opcodes
.AND
));
874 MIB
.addImm(1).add(predOps(ARMCC::AL
)).add(condCodeOp());
877 unsigned SExtResult
= I
.getOperand(0).getReg();
879 // Use a new virtual register for the result of the AND
880 unsigned AndResult
= MRI
.createVirtualRegister(&ARM::GPRRegClass
);
881 I
.getOperand(0).setReg(AndResult
);
883 auto InsertBefore
= std::next(I
.getIterator());
885 BuildMI(MBB
, InsertBefore
, I
.getDebugLoc(), TII
.get(Opcodes
.RSB
))
889 .add(predOps(ARMCC::AL
))
891 if (!constrainSelectedInstRegOperands(*SubI
, TII
, TRI
, RBI
))
898 unsigned NewOpc
= selectSimpleExtOpc(I
.getOpcode(), SrcSize
);
899 if (NewOpc
== I
.getOpcode())
901 I
.setDesc(TII
.get(NewOpc
));
902 MIB
.addImm(0).add(predOps(ARMCC::AL
));
906 LLVM_DEBUG(dbgs() << "Unsupported source size for extension");
913 // The high bits are undefined, so there's nothing special to do, just
914 // treat it as a copy.
915 auto SrcReg
= I
.getOperand(1).getReg();
916 auto DstReg
= I
.getOperand(0).getReg();
918 const auto &SrcRegBank
= *RBI
.getRegBank(SrcReg
, MRI
, TRI
);
919 const auto &DstRegBank
= *RBI
.getRegBank(DstReg
, MRI
, TRI
);
921 if (SrcRegBank
.getID() == ARM::FPRRegBankID
) {
922 // This should only happen in the obscure case where we have put a 64-bit
923 // integer into a D register. Get it out of there and keep only the
925 assert(I
.getOpcode() == G_TRUNC
&& "Unsupported operand for G_ANYEXT");
926 assert(DstRegBank
.getID() == ARM::GPRRegBankID
&&
927 "Unsupported combination of register banks");
928 assert(MRI
.getType(SrcReg
).getSizeInBits() == 64 && "Unsupported size");
929 assert(MRI
.getType(DstReg
).getSizeInBits() <= 32 && "Unsupported size");
931 unsigned IgnoredBits
= MRI
.createVirtualRegister(&ARM::GPRRegClass
);
932 auto InsertBefore
= std::next(I
.getIterator());
934 BuildMI(MBB
, InsertBefore
, I
.getDebugLoc(), TII
.get(ARM::VMOVRRD
))
938 .add(predOps(ARMCC::AL
));
939 if (!constrainSelectedInstRegOperands(*MovI
, TII
, TRI
, RBI
))
942 MIB
->eraseFromParent();
946 if (SrcRegBank
.getID() != DstRegBank
.getID()) {
948 dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n");
952 if (SrcRegBank
.getID() != ARM::GPRRegBankID
) {
953 LLVM_DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n");
957 I
.setDesc(TII
.get(COPY
));
958 return selectCopy(I
, TII
, MRI
, TRI
, RBI
);
961 if (!MRI
.getType(I
.getOperand(0).getReg()).isPointer()) {
962 // Non-pointer constants should be handled by TableGen.
963 LLVM_DEBUG(dbgs() << "Unsupported constant type\n");
967 auto &Val
= I
.getOperand(1);
969 if (!Val
.getCImm()->isZero()) {
970 LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
973 Val
.ChangeToImmediate(0);
975 assert(Val
.isImm() && "Unexpected operand for G_CONSTANT");
976 if (Val
.getImm() != 0) {
977 LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
982 assert(!STI
.isThumb() && "Unsupported subtarget");
983 I
.setDesc(TII
.get(ARM::MOVi
));
984 MIB
.add(predOps(ARMCC::AL
)).add(condCodeOp());
988 // Load from constant pool
989 unsigned Size
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits() / 8;
990 unsigned Alignment
= Size
;
992 assert((Size
== 4 || Size
== 8) && "Unsupported FP constant type");
993 auto LoadOpcode
= Size
== 4 ? ARM::VLDRS
: ARM::VLDRD
;
995 auto ConstPool
= MF
.getConstantPool();
997 ConstPool
->getConstantPoolIndex(I
.getOperand(1).getFPImm(), Alignment
);
998 MIB
->setDesc(TII
.get(LoadOpcode
));
999 MIB
->RemoveOperand(1);
1000 MIB
.addConstantPoolIndex(CPIndex
, /*Offset*/ 0, /*TargetFlags*/ 0)
1002 MF
.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF
),
1003 MachineMemOperand::MOLoad
, Size
, Alignment
))
1005 .add(predOps(ARMCC::AL
));
1010 auto SrcReg
= I
.getOperand(1).getReg();
1011 auto DstReg
= I
.getOperand(0).getReg();
1013 const auto &SrcRegBank
= *RBI
.getRegBank(SrcReg
, MRI
, TRI
);
1014 const auto &DstRegBank
= *RBI
.getRegBank(DstReg
, MRI
, TRI
);
1016 if (SrcRegBank
.getID() != DstRegBank
.getID()) {
1019 << "G_INTTOPTR/G_PTRTOINT operands on different register banks\n");
1023 if (SrcRegBank
.getID() != ARM::GPRRegBankID
) {
1025 dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n");
1029 I
.setDesc(TII
.get(COPY
));
1030 return selectCopy(I
, TII
, MRI
, TRI
, RBI
);
1033 return selectSelect(MIB
, MRI
);
1035 CmpConstants
Helper(Opcodes
.CMPrr
, ARM::INSTRUCTION_LIST_END
,
1036 Opcodes
.MOVCCi
, ARM::GPRRegBankID
, 32);
1037 return selectCmp(Helper
, MIB
, MRI
);
1040 assert(STI
.hasVFP2Base() && "Can't select fcmp without VFP");
1042 unsigned OpReg
= I
.getOperand(2).getReg();
1043 unsigned Size
= MRI
.getType(OpReg
).getSizeInBits();
1045 if (Size
== 64 && !STI
.hasFP64()) {
1046 LLVM_DEBUG(dbgs() << "Subtarget only supports single precision");
1049 if (Size
!= 32 && Size
!= 64) {
1050 LLVM_DEBUG(dbgs() << "Unsupported size for G_FCMP operand");
1054 CmpConstants
Helper(Size
== 32 ? ARM::VCMPS
: ARM::VCMPD
, ARM::FMSTAT
,
1055 Opcodes
.MOVCCi
, ARM::FPRRegBankID
, Size
);
1056 return selectCmp(Helper
, MIB
, MRI
);
1059 return selectShift(ARM_AM::ShiftOpc::lsr
, MIB
);
1061 return selectShift(ARM_AM::ShiftOpc::asr
, MIB
);
1063 return selectShift(ARM_AM::ShiftOpc::lsl
, MIB
);
1066 I
.setDesc(TII
.get(Opcodes
.ADDrr
));
1067 MIB
.add(predOps(ARMCC::AL
)).add(condCodeOp());
1070 // Add 0 to the given frame index and hope it will eventually be folded into
1072 I
.setDesc(TII
.get(Opcodes
.ADDri
));
1073 MIB
.addImm(0).add(predOps(ARMCC::AL
)).add(condCodeOp());
1075 case G_GLOBAL_VALUE
:
1076 return selectGlobal(MIB
, MRI
);
1079 const auto &MemOp
= **I
.memoperands_begin();
1080 if (MemOp
.getOrdering() != AtomicOrdering::NotAtomic
) {
1081 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
1085 unsigned Reg
= I
.getOperand(0).getReg();
1086 unsigned RegBank
= RBI
.getRegBank(Reg
, MRI
, TRI
)->getID();
1088 LLT ValTy
= MRI
.getType(Reg
);
1089 const auto ValSize
= ValTy
.getSizeInBits();
1091 assert((ValSize
!= 64 || STI
.hasVFP2Base()) &&
1092 "Don't know how to load/store 64-bit value without VFP");
1094 const auto NewOpc
= selectLoadStoreOpCode(I
.getOpcode(), RegBank
, ValSize
);
1095 if (NewOpc
== G_LOAD
|| NewOpc
== G_STORE
)
1098 if (ValSize
== 1 && NewOpc
== Opcodes
.STORE8
) {
1099 // Before storing a 1-bit value, make sure to clear out any unneeded bits.
1100 unsigned OriginalValue
= I
.getOperand(0).getReg();
1102 unsigned ValueToStore
= MRI
.createVirtualRegister(&ARM::GPRRegClass
);
1103 I
.getOperand(0).setReg(ValueToStore
);
1105 auto InsertBefore
= I
.getIterator();
1106 auto AndI
= BuildMI(MBB
, InsertBefore
, I
.getDebugLoc(), TII
.get(Opcodes
.AND
))
1107 .addDef(ValueToStore
)
1108 .addUse(OriginalValue
)
1110 .add(predOps(ARMCC::AL
))
1112 if (!constrainSelectedInstRegOperands(*AndI
, TII
, TRI
, RBI
))
1116 I
.setDesc(TII
.get(NewOpc
));
1118 if (NewOpc
== ARM::LDRH
|| NewOpc
== ARM::STRH
)
1119 // LDRH has a funny addressing mode (there's already a FIXME for it).
1121 MIB
.addImm(0).add(predOps(ARMCC::AL
));
1124 case G_MERGE_VALUES
: {
1125 if (!selectMergeValues(MIB
, TII
, MRI
, TRI
, RBI
))
1129 case G_UNMERGE_VALUES
: {
1130 if (!selectUnmergeValues(MIB
, TII
, MRI
, TRI
, RBI
))
1135 if (!validReg(MRI
, I
.getOperand(0).getReg(), 1, ARM::GPRRegBankID
)) {
1136 LLVM_DEBUG(dbgs() << "Unsupported condition register for G_BRCOND");
1142 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opcodes
.TSTri
))
1143 .addReg(I
.getOperand(0).getReg())
1145 .add(predOps(ARMCC::AL
));
1146 if (!constrainSelectedInstRegOperands(*Test
, TII
, TRI
, RBI
))
1149 // Branch conditionally.
1151 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opcodes
.Bcc
))
1152 .add(I
.getOperand(1))
1153 .add(predOps(ARMCC::NE
, ARM::CPSR
));
1154 if (!constrainSelectedInstRegOperands(*Branch
, TII
, TRI
, RBI
))
1156 I
.eraseFromParent();
1160 I
.setDesc(TII
.get(PHI
));
1162 unsigned DstReg
= I
.getOperand(0).getReg();
1163 const TargetRegisterClass
*RC
= guessRegClass(DstReg
, MRI
, TRI
, RBI
);
1164 if (!RBI
.constrainGenericRegister(DstReg
, *RC
, MRI
)) {
1174 return constrainSelectedInstRegOperands(I
, TII
, TRI
, RBI
);