1 //===-- ARMSubtarget.cpp - ARM Subtarget Information ----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the ARM specific subclass of TargetSubtargetInfo.
11 //===----------------------------------------------------------------------===//
15 #include "ARMCallLowering.h"
16 #include "ARMFrameLowering.h"
17 #include "ARMInstrInfo.h"
18 #include "ARMLegalizerInfo.h"
19 #include "ARMRegisterBankInfo.h"
20 #include "ARMSubtarget.h"
21 #include "ARMTargetMachine.h"
22 #include "MCTargetDesc/ARMMCTargetDesc.h"
23 #include "Thumb1FrameLowering.h"
24 #include "Thumb1InstrInfo.h"
25 #include "Thumb2InstrInfo.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalValue.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/MC/MCTargetOptions.h"
35 #include "llvm/Support/CodeGen.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/TargetParser/ARMTargetParser.h"
39 #include "llvm/TargetParser/Triple.h"
43 #define DEBUG_TYPE "arm-subtarget"
45 #define GET_SUBTARGETINFO_TARGET_DESC
46 #define GET_SUBTARGETINFO_CTOR
47 #include "ARMGenSubtargetInfo.inc"
50 UseFusedMulOps("arm-use-mulops",
51 cl::init(true), cl::Hidden
);
58 static cl::opt
<ITMode
>
59 IT(cl::desc("IT block support"), cl::Hidden
, cl::init(DefaultIT
),
60 cl::values(clEnumValN(DefaultIT
, "arm-default-it",
61 "Generate any type of IT block"),
62 clEnumValN(RestrictedIT
, "arm-restrict-it",
63 "Disallow complex IT blocks")));
65 /// ForceFastISel - Use the fast-isel, even for subtargets where it is not
66 /// currently supported (for testing only).
68 ForceFastISel("arm-force-fast-isel",
69 cl::init(false), cl::Hidden
);
71 /// initializeSubtargetDependencies - Initializes using a CPU and feature string
72 /// so that we can use initializer lists for subtarget initialization.
73 ARMSubtarget
&ARMSubtarget::initializeSubtargetDependencies(StringRef CPU
,
75 initializeEnvironment();
76 initSubtargetFeatures(CPU
, FS
);
80 ARMFrameLowering
*ARMSubtarget::initializeFrameLowering(StringRef CPU
,
82 ARMSubtarget
&STI
= initializeSubtargetDependencies(CPU
, FS
);
83 if (STI
.isThumb1Only())
84 return (ARMFrameLowering
*)new Thumb1FrameLowering(STI
);
86 return new ARMFrameLowering(STI
);
89 ARMSubtarget::ARMSubtarget(const Triple
&TT
, const std::string
&CPU
,
90 const std::string
&FS
,
91 const ARMBaseTargetMachine
&TM
, bool IsLittle
,
93 : ARMGenSubtargetInfo(TT
, CPU
, /*TuneCPU*/ CPU
, FS
),
94 UseMulOps(UseFusedMulOps
), CPUString(CPU
), OptMinSize(MinSize
),
95 IsLittle(IsLittle
), TargetTriple(TT
), Options(TM
.Options
), TM(TM
),
96 FrameLowering(initializeFrameLowering(CPU
, FS
)),
97 // At this point initializeSubtargetDependencies has been called so
98 // we can query directly.
99 InstrInfo(isThumb1Only()
100 ? (ARMBaseInstrInfo
*)new Thumb1InstrInfo(*this)
102 ? (ARMBaseInstrInfo
*)new ARMInstrInfo(*this)
103 : (ARMBaseInstrInfo
*)new Thumb2InstrInfo(*this)),
106 CallLoweringInfo
.reset(new ARMCallLowering(*getTargetLowering()));
107 Legalizer
.reset(new ARMLegalizerInfo(*this));
109 auto *RBI
= new ARMRegisterBankInfo(*getRegisterInfo());
111 // FIXME: At this point, we can't rely on Subtarget having RBI.
112 // It's awkward to mix passing RBI and the Subtarget; should we pass
114 InstSelector
.reset(createARMInstructionSelector(TM
, *this, *RBI
));
116 RegBankInfo
.reset(RBI
);
119 const CallLowering
*ARMSubtarget::getCallLowering() const {
120 return CallLoweringInfo
.get();
123 InstructionSelector
*ARMSubtarget::getInstructionSelector() const {
124 return InstSelector
.get();
127 const LegalizerInfo
*ARMSubtarget::getLegalizerInfo() const {
128 return Legalizer
.get();
131 const RegisterBankInfo
*ARMSubtarget::getRegBankInfo() const {
132 return RegBankInfo
.get();
135 bool ARMSubtarget::isXRaySupported() const {
136 // We don't currently suppport Thumb, but Windows requires Thumb.
137 return hasV6Ops() && hasARMOps() && !isTargetWindows();
140 void ARMSubtarget::initializeEnvironment() {
141 // MCAsmInfo isn't always present (e.g. in opt) so we can't initialize this
142 // directly from it, but we can try to make sure they're consistent when both
144 UseSjLjEH
= (isTargetDarwin() && !isTargetWatchABI() &&
145 Options
.ExceptionModel
== ExceptionHandling::None
) ||
146 Options
.ExceptionModel
== ExceptionHandling::SjLj
;
147 assert((!TM
.getMCAsmInfo() ||
148 (TM
.getMCAsmInfo()->getExceptionHandlingType() ==
149 ExceptionHandling::SjLj
) == UseSjLjEH
) &&
150 "inconsistent sjlj choice between CodeGen and MC");
153 void ARMSubtarget::initSubtargetFeatures(StringRef CPU
, StringRef FS
) {
154 if (CPUString
.empty()) {
155 CPUString
= "generic";
157 if (isTargetDarwin()) {
158 StringRef ArchName
= TargetTriple
.getArchName();
159 ARM::ArchKind AK
= ARM::parseArch(ArchName
);
160 if (AK
== ARM::ArchKind::ARMV7S
)
161 // Default to the Swift CPU when targeting armv7s/thumbv7s.
163 else if (AK
== ARM::ArchKind::ARMV7K
)
164 // Default to the Cortex-a7 CPU when targeting armv7k/thumbv7k.
165 // ARMv7k does not use SjLj exception handling.
166 CPUString
= "cortex-a7";
170 // Insert the architecture feature derived from the target triple into the
171 // feature string. This is important for setting features that are implied
172 // based on the architecture version.
173 std::string ArchFS
= ARM_MC::ParseARMTriple(TargetTriple
, CPUString
);
176 ArchFS
= (Twine(ArchFS
) + "," + FS
).str();
178 ArchFS
= std::string(FS
);
180 ParseSubtargetFeatures(CPUString
, /*TuneCPU*/ CPUString
, ArchFS
);
182 // FIXME: This used enable V6T2 support implicitly for Thumb2 mode.
183 // Assert this for now to make the change obvious.
184 assert(hasV6T2Ops() || !hasThumb2());
186 if (genExecuteOnly()) {
187 // Execute only support for >= v8-M Baseline requires movt support
188 if (hasV8MBaselineOps())
191 report_fatal_error("Cannot generate execute-only code for this target");
194 // Keep a pointer to static instruction cost data for the specified CPU.
195 SchedModel
= getSchedModelForCPU(CPUString
);
197 // Initialize scheduling itinerary for the specified CPU.
198 InstrItins
= getInstrItineraryForCPU(CPUString
);
200 // FIXME: this is invalid for WindowsCE
201 if (isTargetWindows())
205 stackAlignment
= Align(8);
206 if (isTargetNaCl() || isAAPCS16_ABI())
207 stackAlignment
= Align(16);
209 // FIXME: Completely disable sibcall for Thumb1 since ThumbRegisterInfo::
210 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as
211 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation
212 // support in the assembler and linker to be used. This would need to be
213 // fixed to fully support tail calls in Thumb1.
215 // For ARMv8-M, we /do/ implement tail calls. Doing this is tricky for v8-M
216 // baseline, since the LDM/POP instruction on Thumb doesn't take LR. This
217 // means if we need to reload LR, it takes extra instructions, which outweighs
218 // the value of the tail call; but here we don't know yet whether LR is going
219 // to be used. We take the optimistic approach of generating the tail call and
220 // perhaps taking a hit if we need to restore the LR.
222 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls,
223 // but we need to make sure there are enough registers; the only valid
224 // registers are the 4 used for parameters. We don't currently do this
227 SupportsTailCall
= !isThumb1Only() || hasV8MBaselineOps();
229 if (isTargetMachO() && isTargetIOS() && getTargetTriple().isOSVersionLT(5, 0))
230 SupportsTailCall
= false;
241 // NEON f32 ops are non-IEEE 754 compliant. Darwin is ok with it by default.
242 const FeatureBitset
&Bits
= getFeatureBits();
243 if ((Bits
[ARM::ProcA5
] || Bits
[ARM::ProcA8
]) && // Where this matters
244 (Options
.UnsafeFPMath
|| isTargetDarwin()))
250 // If MVEVectorCostFactor is still 0 (has not been set to anything else), default it to 2
251 if (MVEVectorCostFactor
== 0)
252 MVEVectorCostFactor
= 2;
254 // FIXME: Teach TableGen to deal with these instead of doing it manually here.
255 switch (ARMProcFamily
) {
260 LdStMultipleTiming
= DoubleIssue
;
263 LdStMultipleTiming
= DoubleIssue
;
266 LdStMultipleTiming
= DoubleIssueCheckUnalignedAccess
;
267 PreISelOperandLatencyAdjustment
= 1;
272 MaxInterleaveFactor
= 2;
273 PreISelOperandLatencyAdjustment
= 1;
274 PartialUpdateClearance
= 12;
305 LdStMultipleTiming
= SingleIssuePlusExtras
;
306 MaxInterleaveFactor
= 4;
308 PreferBranchLogAlignment
= 3;
313 PreISelOperandLatencyAdjustment
= 1;
318 MaxInterleaveFactor
= 2;
319 LdStMultipleTiming
= SingleIssuePlusExtras
;
320 PreISelOperandLatencyAdjustment
= 1;
321 PartialUpdateClearance
= 12;
326 bool ARMSubtarget::isTargetHardFloat() const { return TM
.isTargetHardFloat(); }
328 bool ARMSubtarget::isAPCS_ABI() const {
329 assert(TM
.TargetABI
!= ARMBaseTargetMachine::ARM_ABI_UNKNOWN
);
330 return TM
.TargetABI
== ARMBaseTargetMachine::ARM_ABI_APCS
;
332 bool ARMSubtarget::isAAPCS_ABI() const {
333 assert(TM
.TargetABI
!= ARMBaseTargetMachine::ARM_ABI_UNKNOWN
);
334 return TM
.TargetABI
== ARMBaseTargetMachine::ARM_ABI_AAPCS
||
335 TM
.TargetABI
== ARMBaseTargetMachine::ARM_ABI_AAPCS16
;
337 bool ARMSubtarget::isAAPCS16_ABI() const {
338 assert(TM
.TargetABI
!= ARMBaseTargetMachine::ARM_ABI_UNKNOWN
);
339 return TM
.TargetABI
== ARMBaseTargetMachine::ARM_ABI_AAPCS16
;
342 bool ARMSubtarget::isROPI() const {
343 return TM
.getRelocationModel() == Reloc::ROPI
||
344 TM
.getRelocationModel() == Reloc::ROPI_RWPI
;
346 bool ARMSubtarget::isRWPI() const {
347 return TM
.getRelocationModel() == Reloc::RWPI
||
348 TM
.getRelocationModel() == Reloc::ROPI_RWPI
;
351 bool ARMSubtarget::isGVIndirectSymbol(const GlobalValue
*GV
) const {
352 if (!TM
.shouldAssumeDSOLocal(GV
))
355 // 32 bit macho has no relocation for a-b if a is undefined, even if b is in
356 // the section that is being relocated. This means we have to use o load even
357 // for GVs that are known to be local to the dso.
358 if (isTargetMachO() && TM
.isPositionIndependent() &&
359 (GV
->isDeclarationForLinker() || GV
->hasCommonLinkage()))
365 bool ARMSubtarget::isGVInGOT(const GlobalValue
*GV
) const {
366 return isTargetELF() && TM
.isPositionIndependent() && !GV
->isDSOLocal();
369 unsigned ARMSubtarget::getMispredictionPenalty() const {
370 return SchedModel
.MispredictPenalty
;
373 bool ARMSubtarget::enableMachineScheduler() const {
374 // The MachineScheduler can increase register usage, so we use more high
375 // registers and end up with more T2 instructions that cannot be converted to
376 // T1 instructions. At least until we do better at converting to thumb1
377 // instructions, on cortex-m at Oz where we are size-paranoid, don't use the
378 // Machine scheduler, relying on the DAG register pressure scheduler instead.
379 if (isMClass() && hasMinSize())
381 // Enable the MachineScheduler before register allocation for subtargets
382 // with the use-misched feature.
383 return useMachineScheduler();
386 bool ARMSubtarget::enableSubRegLiveness() const {
387 // Enable SubRegLiveness for MVE to better optimize s subregs for mqpr regs
388 // and q subregs for qqqqpr regs.
389 return hasMVEIntegerOps();
392 bool ARMSubtarget::enableMachinePipeliner() const {
393 // Enable the MachinePipeliner before register allocation for subtargets
394 // with the use-mipipeliner feature.
395 return getSchedModel().hasInstrSchedModel() && useMachinePipeliner();
398 bool ARMSubtarget::useDFAforSMS() const { return false; }
400 // This overrides the PostRAScheduler bit in the SchedModel for any CPU.
401 bool ARMSubtarget::enablePostRAScheduler() const {
402 if (enableMachineScheduler())
404 if (disablePostRAScheduler())
406 // Thumb1 cores will generally not benefit from post-ra scheduling
407 return !isThumb1Only();
410 bool ARMSubtarget::enablePostRAMachineScheduler() const {
411 if (!enableMachineScheduler())
413 if (disablePostRAScheduler())
415 return !isThumb1Only();
418 bool ARMSubtarget::useStride4VFPs() const {
419 // For general targets, the prologue can grow when VFPs are allocated with
420 // stride 4 (more vpush instructions). But WatchOS uses a compact unwind
421 // format which it's more important to get right.
422 return isTargetWatchABI() ||
423 (useWideStrideVFP() && !OptMinSize
);
426 bool ARMSubtarget::useMovt() const {
427 // NOTE Windows on ARM needs to use mov.w/mov.t pairs to materialise 32-bit
428 // immediates as it is inherently position independent, and may be out of
430 return !NoMovt
&& hasV8MBaselineOps() &&
431 (isTargetWindows() || !OptMinSize
|| genExecuteOnly());
434 bool ARMSubtarget::useFastISel() const {
435 // Enable fast-isel for any target, for testing only.
439 // Limit fast-isel to the targets that are or have been tested.
443 // Thumb2 support on iOS; ARM support on iOS, Linux and NaCl.
444 return TM
.Options
.EnableFastISel
&&
445 ((isTargetMachO() && !isThumb1Only()) ||
446 (isTargetLinux() && !isThumb()) || (isTargetNaCl() && !isThumb()));
449 unsigned ARMSubtarget::getGPRAllocationOrder(const MachineFunction
&MF
) const {
450 // The GPR register class has multiple possible allocation orders, with
451 // tradeoffs preferred by different sub-architectures and optimisation goals.
452 // The allocation orders are:
453 // 0: (the default tablegen order, not used)
456 // 3: r0-r7, r12, lr, r8-r11
457 // Note that the register allocator will change this order so that
458 // callee-saved registers are used later, as they require extra work in the
459 // prologue/epilogue (though we sometimes override that).
461 // For thumb1-only targets, only the low registers are allocatable.
465 // Allocate low registers first, so we can select more 16-bit instructions.
466 // We also (in ignoreCSRForAllocationOrder) override the default behaviour
467 // with regards to callee-saved registers, because pushing extra registers is
468 // much cheaper (in terms of code size) than using high registers. After
469 // that, we allocate r12 (doesn't need to be saved), lr (saving it means we
470 // can return with the pop, don't need an extra "bx lr") and then the rest of
471 // the high registers.
472 if (isThumb2() && MF
.getFunction().hasMinSize())
475 // Otherwise, allocate in the default order, using LR first because saving it
476 // allows a shorter epilogue sequence.
480 bool ARMSubtarget::ignoreCSRForAllocationOrder(const MachineFunction
&MF
,
481 MCRegister PhysReg
) const {
482 // To minimize code size in Thumb2, we prefer the usage of low regs (lower
483 // cost per use) so we can use narrow encoding. By default, caller-saved
484 // registers (e.g. lr, r12) are always allocated first, regardless of
485 // their cost per use. When optForMinSize, we prefer the low regs even if
486 // they are CSR because usually push/pop can be folded into existing ones.
487 return isThumb2() && MF
.getFunction().hasMinSize() &&
488 ARM::GPRRegClass
.contains(PhysReg
);
491 ARMSubtarget::PushPopSplitVariation
492 ARMSubtarget::getPushPopSplitVariation(const MachineFunction
&MF
) const {
493 const Function
&F
= MF
.getFunction();
494 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
495 const std::vector
<CalleeSavedInfo
> CSI
=
496 MF
.getFrameInfo().getCalleeSavedInfo();
498 // Thumb1 always splits the pushes at R7, because the Thumb1 push instruction
499 // cannot use high registers except for lr.
503 // If R7 is the frame pointer, we must split at R7 to ensure that the
504 // previous frame pointer (R7) and return address (LR) are adjacent on the
505 // stack, to form a valid frame record.
506 if (getFramePointerReg() == ARM::R7
&&
507 MF
.getTarget().Options
.FramePointerIsReserved(MF
))
510 // Returns SplitR11WindowsSEH when the stack pointer needs to be
511 // restored from the frame pointer r11 + an offset and Windows CFI is enabled.
512 // This stack unwinding cannot be expressed with SEH unwind opcodes when done
513 // with a single push, making it necessary to split the push into r4-r10, and
514 // another containing r11+lr.
515 if (MF
.getTarget().getMCAsmInfo()->usesWindowsCFI() &&
516 F
.needsUnwindTableEntry() &&
517 (MFI
.hasVarSizedObjects() || getRegisterInfo()->hasStackRealignment(MF
)))
518 return SplitR11WindowsSEH
;
520 // Returns SplitR11AAPCSSignRA when the frame pointer is R11, requiring R11
521 // and LR to be adjacent on the stack, and branch signing is enabled,
522 // requiring R12 to be on the stack.
523 if (MF
.getInfo
<ARMFunctionInfo
>()->shouldSignReturnAddress() &&
524 getFramePointerReg() == ARM::R11
&&
525 MF
.getTarget().Options
.FramePointerIsReserved(MF
))
526 return SplitR11AAPCSSignRA
;