1 //===-- ARMTargetMachine.cpp - Define TargetMachine for ARM ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 //===----------------------------------------------------------------------===//
12 #include "ARMTargetMachine.h"
14 #include "ARMMacroFusion.h"
15 #include "ARMSubtarget.h"
16 #include "ARMTargetObjectFile.h"
17 #include "ARMTargetTransformInfo.h"
18 #include "MCTargetDesc/ARMMCTargetDesc.h"
19 #include "TargetInfo/ARMTargetInfo.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/Triple.h"
24 #include "llvm/Analysis/TargetTransformInfo.h"
25 #include "llvm/CodeGen/ExecutionDomainFix.h"
26 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
27 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
28 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
29 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
30 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
31 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
32 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
33 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
34 #include "llvm/CodeGen/MachineFunction.h"
35 #include "llvm/CodeGen/MachineScheduler.h"
36 #include "llvm/CodeGen/Passes.h"
37 #include "llvm/CodeGen/TargetPassConfig.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/Pass.h"
42 #include "llvm/Support/CodeGen.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/TargetParser.h"
46 #include "llvm/Support/TargetRegistry.h"
47 #include "llvm/Target/TargetLoweringObjectFile.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/Transforms/Scalar.h"
57 DisableA15SDOptimization("disable-a15-sd-optimization", cl::Hidden
,
58 cl::desc("Inhibit optimization of S->D register accesses on A15"),
62 EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden
,
63 cl::desc("Run SimplifyCFG after expanding atomic operations"
64 " to make use of cmpxchg flow-based information"),
68 EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden
,
69 cl::desc("Enable ARM load/store optimization pass"),
72 // FIXME: Unify control over GlobalMerge.
73 static cl::opt
<cl::boolOrDefault
>
74 EnableGlobalMerge("arm-global-merge", cl::Hidden
,
75 cl::desc("Enable the global merge pass"));
78 void initializeARMExecutionDomainFixPass(PassRegistry
&);
81 extern "C" void LLVMInitializeARMTarget() {
82 // Register the target.
83 RegisterTargetMachine
<ARMLETargetMachine
> X(getTheARMLETarget());
84 RegisterTargetMachine
<ARMLETargetMachine
> A(getTheThumbLETarget());
85 RegisterTargetMachine
<ARMBETargetMachine
> Y(getTheARMBETarget());
86 RegisterTargetMachine
<ARMBETargetMachine
> B(getTheThumbBETarget());
88 PassRegistry
&Registry
= *PassRegistry::getPassRegistry();
89 initializeGlobalISel(Registry
);
90 initializeARMLoadStoreOptPass(Registry
);
91 initializeARMPreAllocLoadStoreOptPass(Registry
);
92 initializeARMParallelDSPPass(Registry
);
93 initializeARMCodeGenPreparePass(Registry
);
94 initializeARMConstantIslandsPass(Registry
);
95 initializeARMExecutionDomainFixPass(Registry
);
96 initializeARMExpandPseudoPass(Registry
);
97 initializeThumb2SizeReducePass(Registry
);
98 initializeMVEVPTBlockPass(Registry
);
99 initializeARMLowOverheadLoopsPass(Registry
);
102 static std::unique_ptr
<TargetLoweringObjectFile
> createTLOF(const Triple
&TT
) {
103 if (TT
.isOSBinFormatMachO())
104 return std::make_unique
<TargetLoweringObjectFileMachO
>();
105 if (TT
.isOSWindows())
106 return std::make_unique
<TargetLoweringObjectFileCOFF
>();
107 return std::make_unique
<ARMElfTargetObjectFile
>();
110 static ARMBaseTargetMachine::ARMABI
111 computeTargetABI(const Triple
&TT
, StringRef CPU
,
112 const TargetOptions
&Options
) {
113 StringRef ABIName
= Options
.MCOptions
.getABIName();
116 ABIName
= ARM::computeDefaultTargetABI(TT
, CPU
);
118 if (ABIName
== "aapcs16")
119 return ARMBaseTargetMachine::ARM_ABI_AAPCS16
;
120 else if (ABIName
.startswith("aapcs"))
121 return ARMBaseTargetMachine::ARM_ABI_AAPCS
;
122 else if (ABIName
.startswith("apcs"))
123 return ARMBaseTargetMachine::ARM_ABI_APCS
;
125 llvm_unreachable("Unhandled/unknown ABI Name!");
126 return ARMBaseTargetMachine::ARM_ABI_UNKNOWN
;
129 static std::string
computeDataLayout(const Triple
&TT
, StringRef CPU
,
130 const TargetOptions
&Options
,
132 auto ABI
= computeTargetABI(TT
, CPU
, Options
);
142 Ret
+= DataLayout::getManglingComponent(TT
);
144 // Pointers are 32 bits and aligned to 32 bits.
147 // Function pointers are aligned to 8 bits (because the LSB stores the
151 // ABIs other than APCS have 64 bit integers with natural alignment.
152 if (ABI
!= ARMBaseTargetMachine::ARM_ABI_APCS
)
155 // We have 64 bits floats. The APCS ABI requires them to be aligned to 32
156 // bits, others to 64 bits. We always try to align to 64 bits.
157 if (ABI
== ARMBaseTargetMachine::ARM_ABI_APCS
)
160 // We have 128 and 64 bit vectors. The APCS ABI aligns them to 32 bits, others
161 // to 64. We always ty to give them natural alignment.
162 if (ABI
== ARMBaseTargetMachine::ARM_ABI_APCS
)
163 Ret
+= "-v64:32:64-v128:32:128";
164 else if (ABI
!= ARMBaseTargetMachine::ARM_ABI_AAPCS16
)
165 Ret
+= "-v128:64:128";
167 // Try to align aggregates to 32 bits (the default is 64 bits, which has no
168 // particular hardware support on 32-bit ARM).
171 // Integer registers are 32 bits.
174 // The stack is 128 bit aligned on NaCl, 64 bit aligned on AAPCS and 32 bit
175 // aligned everywhere else.
176 if (TT
.isOSNaCl() || ABI
== ARMBaseTargetMachine::ARM_ABI_AAPCS16
)
178 else if (ABI
== ARMBaseTargetMachine::ARM_ABI_AAPCS
)
186 static Reloc::Model
getEffectiveRelocModel(const Triple
&TT
,
187 Optional
<Reloc::Model
> RM
) {
189 // Default relocation model on Darwin is PIC.
190 return TT
.isOSBinFormatMachO() ? Reloc::PIC_
: Reloc::Static
;
192 if (*RM
== Reloc::ROPI
|| *RM
== Reloc::RWPI
|| *RM
== Reloc::ROPI_RWPI
)
193 assert(TT
.isOSBinFormatELF() &&
194 "ROPI/RWPI currently only supported for ELF");
196 // DynamicNoPIC is only used on darwin.
197 if (*RM
== Reloc::DynamicNoPIC
&& !TT
.isOSDarwin())
198 return Reloc::Static
;
203 /// Create an ARM architecture model.
205 ARMBaseTargetMachine::ARMBaseTargetMachine(const Target
&T
, const Triple
&TT
,
206 StringRef CPU
, StringRef FS
,
207 const TargetOptions
&Options
,
208 Optional
<Reloc::Model
> RM
,
209 Optional
<CodeModel::Model
> CM
,
210 CodeGenOpt::Level OL
, bool isLittle
)
211 : LLVMTargetMachine(T
, computeDataLayout(TT
, CPU
, Options
, isLittle
), TT
,
212 CPU
, FS
, Options
, getEffectiveRelocModel(TT
, RM
),
213 getEffectiveCodeModel(CM
, CodeModel::Small
), OL
),
214 TargetABI(computeTargetABI(TT
, CPU
, Options
)),
215 TLOF(createTLOF(getTargetTriple())), isLittle(isLittle
) {
217 // Default to triple-appropriate float ABI
218 if (Options
.FloatABIType
== FloatABI::Default
) {
219 if (isTargetHardFloat())
220 this->Options
.FloatABIType
= FloatABI::Hard
;
222 this->Options
.FloatABIType
= FloatABI::Soft
;
225 // Default to triple-appropriate EABI
226 if (Options
.EABIVersion
== EABI::Default
||
227 Options
.EABIVersion
== EABI::Unknown
) {
228 // musl is compatible with glibc with regard to EABI version
229 if ((TargetTriple
.getEnvironment() == Triple::GNUEABI
||
230 TargetTriple
.getEnvironment() == Triple::GNUEABIHF
||
231 TargetTriple
.getEnvironment() == Triple::MuslEABI
||
232 TargetTriple
.getEnvironment() == Triple::MuslEABIHF
) &&
233 !(TargetTriple
.isOSWindows() || TargetTriple
.isOSDarwin()))
234 this->Options
.EABIVersion
= EABI::GNU
;
236 this->Options
.EABIVersion
= EABI::EABI5
;
239 if (TT
.isOSBinFormatMachO()) {
240 this->Options
.TrapUnreachable
= true;
241 this->Options
.NoTrapAfterNoreturn
= true;
247 ARMBaseTargetMachine::~ARMBaseTargetMachine() = default;
250 ARMBaseTargetMachine::getSubtargetImpl(const Function
&F
) const {
251 Attribute CPUAttr
= F
.getFnAttribute("target-cpu");
252 Attribute FSAttr
= F
.getFnAttribute("target-features");
254 std::string CPU
= !CPUAttr
.hasAttribute(Attribute::None
)
255 ? CPUAttr
.getValueAsString().str()
257 std::string FS
= !FSAttr
.hasAttribute(Attribute::None
)
258 ? FSAttr
.getValueAsString().str()
261 // FIXME: This is related to the code below to reset the target options,
262 // we need to know whether or not the soft float flag is set on the
263 // function before we can generate a subtarget. We also need to use
264 // it as a key for the subtarget since that can be the only difference
265 // between two functions.
267 F
.getFnAttribute("use-soft-float").getValueAsString() == "true";
268 // If the soft float attribute is set on the function turn on the soft float
269 // subtarget feature.
271 FS
+= FS
.empty() ? "+soft-float" : ",+soft-float";
273 // Use the optminsize to identify the subtarget, but don't use it in the
275 std::string Key
= CPU
+ FS
;
279 auto &I
= SubtargetMap
[Key
];
281 // This needs to be done before we create a new subtarget since any
282 // creation will depend on the TM and the code generation flags on the
283 // function that reside in TargetOptions.
284 resetTargetOptions(F
);
285 I
= std::make_unique
<ARMSubtarget
>(TargetTriple
, CPU
, FS
, *this, isLittle
,
288 if (!I
->isThumb() && !I
->hasARMOps())
289 F
.getContext().emitError("Function '" + F
.getName() + "' uses ARM "
290 "instructions, but the target does not support ARM mode execution.");
297 ARMBaseTargetMachine::getTargetTransformInfo(const Function
&F
) {
298 return TargetTransformInfo(ARMTTIImpl(this, F
));
301 ARMLETargetMachine::ARMLETargetMachine(const Target
&T
, const Triple
&TT
,
302 StringRef CPU
, StringRef FS
,
303 const TargetOptions
&Options
,
304 Optional
<Reloc::Model
> RM
,
305 Optional
<CodeModel::Model
> CM
,
306 CodeGenOpt::Level OL
, bool JIT
)
307 : ARMBaseTargetMachine(T
, TT
, CPU
, FS
, Options
, RM
, CM
, OL
, true) {}
309 ARMBETargetMachine::ARMBETargetMachine(const Target
&T
, const Triple
&TT
,
310 StringRef CPU
, StringRef FS
,
311 const TargetOptions
&Options
,
312 Optional
<Reloc::Model
> RM
,
313 Optional
<CodeModel::Model
> CM
,
314 CodeGenOpt::Level OL
, bool JIT
)
315 : ARMBaseTargetMachine(T
, TT
, CPU
, FS
, Options
, RM
, CM
, OL
, false) {}
319 /// ARM Code Generator Pass Configuration Options.
320 class ARMPassConfig
: public TargetPassConfig
{
322 ARMPassConfig(ARMBaseTargetMachine
&TM
, PassManagerBase
&PM
)
323 : TargetPassConfig(TM
, PM
) {
324 if (TM
.getOptLevel() != CodeGenOpt::None
) {
325 ARMGenSubtargetInfo
STI(TM
.getTargetTriple(), TM
.getTargetCPU(),
326 TM
.getTargetFeatureString());
327 if (STI
.hasFeature(ARM::FeatureUseMISched
))
328 substitutePass(&PostRASchedulerID
, &PostMachineSchedulerID
);
332 ARMBaseTargetMachine
&getARMTargetMachine() const {
333 return getTM
<ARMBaseTargetMachine
>();
337 createMachineScheduler(MachineSchedContext
*C
) const override
{
338 ScheduleDAGMILive
*DAG
= createGenericSchedLive(C
);
339 // add DAG Mutations here.
340 const ARMSubtarget
&ST
= C
->MF
->getSubtarget
<ARMSubtarget
>();
342 DAG
->addMutation(createARMMacroFusionDAGMutation());
347 createPostMachineScheduler(MachineSchedContext
*C
) const override
{
348 ScheduleDAGMI
*DAG
= createGenericSchedPostRA(C
);
349 // add DAG Mutations here.
350 const ARMSubtarget
&ST
= C
->MF
->getSubtarget
<ARMSubtarget
>();
352 DAG
->addMutation(createARMMacroFusionDAGMutation());
356 void addIRPasses() override
;
357 void addCodeGenPrepare() override
;
358 bool addPreISel() override
;
359 bool addInstSelector() override
;
360 bool addIRTranslator() override
;
361 bool addLegalizeMachineIR() override
;
362 bool addRegBankSelect() override
;
363 bool addGlobalInstructionSelect() override
;
364 void addPreRegAlloc() override
;
365 void addPreSched2() override
;
366 void addPreEmitPass() override
;
368 std::unique_ptr
<CSEConfigBase
> getCSEConfig() const override
;
371 class ARMExecutionDomainFix
: public ExecutionDomainFix
{
374 ARMExecutionDomainFix() : ExecutionDomainFix(ID
, ARM::DPRRegClass
) {}
375 StringRef
getPassName() const override
{
376 return "ARM Execution Domain Fix";
379 char ARMExecutionDomainFix::ID
;
381 } // end anonymous namespace
383 INITIALIZE_PASS_BEGIN(ARMExecutionDomainFix
, "arm-execution-domain-fix",
384 "ARM Execution Domain Fix", false, false)
385 INITIALIZE_PASS_DEPENDENCY(ReachingDefAnalysis
)
386 INITIALIZE_PASS_END(ARMExecutionDomainFix
, "arm-execution-domain-fix",
387 "ARM Execution Domain Fix", false, false)
389 TargetPassConfig
*ARMBaseTargetMachine::createPassConfig(PassManagerBase
&PM
) {
390 return new ARMPassConfig(*this, PM
);
393 std::unique_ptr
<CSEConfigBase
> ARMPassConfig::getCSEConfig() const {
394 return getStandardCSEConfigForOpt(TM
->getOptLevel());
397 void ARMPassConfig::addIRPasses() {
398 if (TM
->Options
.ThreadModel
== ThreadModel::Single
)
399 addPass(createLowerAtomicPass());
401 addPass(createAtomicExpandPass());
403 // Cmpxchg instructions are often used with a subsequent comparison to
404 // determine whether it succeeded. We can exploit existing control-flow in
405 // ldrex/strex loops to simplify this, but it needs tidying up.
406 if (TM
->getOptLevel() != CodeGenOpt::None
&& EnableAtomicTidy
)
407 addPass(createCFGSimplificationPass(
408 1, false, false, true, true, [this](const Function
&F
) {
409 const auto &ST
= this->TM
->getSubtarget
<ARMSubtarget
>(F
);
410 return ST
.hasAnyDataBarrier() && !ST
.isThumb1Only();
413 TargetPassConfig::addIRPasses();
415 // Run the parallel DSP pass.
416 if (getOptLevel() == CodeGenOpt::Aggressive
)
417 addPass(createARMParallelDSPPass());
419 // Match interleaved memory accesses to ldN/stN intrinsics.
420 if (TM
->getOptLevel() != CodeGenOpt::None
)
421 addPass(createInterleavedAccessPass());
424 void ARMPassConfig::addCodeGenPrepare() {
425 if (getOptLevel() != CodeGenOpt::None
)
426 addPass(createARMCodeGenPreparePass());
427 TargetPassConfig::addCodeGenPrepare();
430 bool ARMPassConfig::addPreISel() {
431 if ((TM
->getOptLevel() != CodeGenOpt::None
&&
432 EnableGlobalMerge
== cl::BOU_UNSET
) ||
433 EnableGlobalMerge
== cl::BOU_TRUE
) {
434 // FIXME: This is using the thumb1 only constant value for
435 // maximal global offset for merging globals. We may want
436 // to look into using the old value for non-thumb1 code of
437 // 4095 based on the TargetMachine, but this starts to become
438 // tricky when doing code gen per function.
439 bool OnlyOptimizeForSize
= (TM
->getOptLevel() < CodeGenOpt::Aggressive
) &&
440 (EnableGlobalMerge
== cl::BOU_UNSET
);
441 // Merging of extern globals is enabled by default on non-Mach-O as we
442 // expect it to be generally either beneficial or harmless. On Mach-O it
443 // is disabled as we emit the .subsections_via_symbols directive which
444 // means that merging extern globals is not safe.
445 bool MergeExternalByDefault
= !TM
->getTargetTriple().isOSBinFormatMachO();
446 addPass(createGlobalMergePass(TM
, 127, OnlyOptimizeForSize
,
447 MergeExternalByDefault
));
450 if (TM
->getOptLevel() != CodeGenOpt::None
)
451 addPass(createHardwareLoopsPass());
456 bool ARMPassConfig::addInstSelector() {
457 addPass(createARMISelDag(getARMTargetMachine(), getOptLevel()));
461 bool ARMPassConfig::addIRTranslator() {
462 addPass(new IRTranslator());
466 bool ARMPassConfig::addLegalizeMachineIR() {
467 addPass(new Legalizer());
471 bool ARMPassConfig::addRegBankSelect() {
472 addPass(new RegBankSelect());
476 bool ARMPassConfig::addGlobalInstructionSelect() {
477 addPass(new InstructionSelect());
481 void ARMPassConfig::addPreRegAlloc() {
482 if (getOptLevel() != CodeGenOpt::None
) {
483 addPass(createMLxExpansionPass());
485 if (EnableARMLoadStoreOpt
)
486 addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true));
488 if (!DisableA15SDOptimization
)
489 addPass(createA15SDOptimizerPass());
493 void ARMPassConfig::addPreSched2() {
494 if (getOptLevel() != CodeGenOpt::None
) {
495 if (EnableARMLoadStoreOpt
)
496 addPass(createARMLoadStoreOptimizationPass());
498 addPass(new ARMExecutionDomainFix());
499 addPass(createBreakFalseDeps());
502 // Expand some pseudo instructions into multiple instructions to allow
503 // proper scheduling.
504 addPass(createARMExpandPseudoPass());
506 if (getOptLevel() != CodeGenOpt::None
) {
507 // in v8, IfConversion depends on Thumb instruction widths
508 addPass(createThumb2SizeReductionPass([this](const Function
&F
) {
509 return this->TM
->getSubtarget
<ARMSubtarget
>(F
).restrictIT();
512 addPass(createIfConverter([](const MachineFunction
&MF
) {
513 return !MF
.getSubtarget
<ARMSubtarget
>().isThumb1Only();
516 addPass(createMVEVPTBlockPass());
517 addPass(createThumb2ITBlockPass());
520 void ARMPassConfig::addPreEmitPass() {
521 addPass(createThumb2SizeReductionPass());
523 // Constant island pass work on unbundled instructions.
524 addPass(createUnpackMachineBundles([](const MachineFunction
&MF
) {
525 return MF
.getSubtarget
<ARMSubtarget
>().isThumb2();
528 // Don't optimize barriers at -O0.
529 if (getOptLevel() != CodeGenOpt::None
)
530 addPass(createARMOptimizeBarriersPass());
532 addPass(createARMConstantIslandPass());
533 addPass(createARMLowOverheadLoopsPass());