[InstCombine] Signed saturation tests. NFC
[llvm-complete.git] / lib / Target / AArch64 / AArch64TargetMachine.cpp
blobb3ed96e815beab96362c3c2db43626100425047d
1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //
10 //===----------------------------------------------------------------------===//
12 #include "AArch64TargetMachine.h"
13 #include "AArch64.h"
14 #include "AArch64MacroFusion.h"
15 #include "AArch64Subtarget.h"
16 #include "AArch64TargetObjectFile.h"
17 #include "AArch64TargetTransformInfo.h"
18 #include "MCTargetDesc/AArch64MCTargetDesc.h"
19 #include "TargetInfo/AArch64TargetInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/CodeGen/CSEConfigBase.h"
24 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
25 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
26 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
27 #include "llvm/CodeGen/GlobalISel/Localizer.h"
28 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
29 #include "llvm/CodeGen/MachineScheduler.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/TargetPassConfig.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/MC/MCAsmInfo.h"
35 #include "llvm/MC/MCTargetOptions.h"
36 #include "llvm/Pass.h"
37 #include "llvm/Support/CodeGen.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/TargetRegistry.h"
40 #include "llvm/Target/TargetLoweringObjectFile.h"
41 #include "llvm/Target/TargetOptions.h"
42 #include "llvm/Transforms/Scalar.h"
43 #include <memory>
44 #include <string>
46 using namespace llvm;
48 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
49 cl::desc("Enable the CCMP formation pass"),
50 cl::init(true), cl::Hidden);
52 static cl::opt<bool>
53 EnableCondBrTuning("aarch64-enable-cond-br-tune",
54 cl::desc("Enable the conditional branch tuning pass"),
55 cl::init(true), cl::Hidden);
57 static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
58 cl::desc("Enable the machine combiner pass"),
59 cl::init(true), cl::Hidden);
61 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
62 cl::desc("Suppress STP for AArch64"),
63 cl::init(true), cl::Hidden);
65 static cl::opt<bool> EnableAdvSIMDScalar(
66 "aarch64-enable-simd-scalar",
67 cl::desc("Enable use of AdvSIMD scalar integer instructions"),
68 cl::init(false), cl::Hidden);
70 static cl::opt<bool>
71 EnablePromoteConstant("aarch64-enable-promote-const",
72 cl::desc("Enable the promote constant pass"),
73 cl::init(true), cl::Hidden);
75 static cl::opt<bool> EnableCollectLOH(
76 "aarch64-enable-collect-loh",
77 cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
78 cl::init(true), cl::Hidden);
80 static cl::opt<bool>
81 EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
82 cl::desc("Enable the pass that removes dead"
83 " definitons and replaces stores to"
84 " them with stores to the zero"
85 " register"),
86 cl::init(true));
88 static cl::opt<bool> EnableRedundantCopyElimination(
89 "aarch64-enable-copyelim",
90 cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
91 cl::Hidden);
93 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
94 cl::desc("Enable the load/store pair"
95 " optimization pass"),
96 cl::init(true), cl::Hidden);
98 static cl::opt<bool> EnableAtomicTidy(
99 "aarch64-enable-atomic-cfg-tidy", cl::Hidden,
100 cl::desc("Run SimplifyCFG after expanding atomic operations"
101 " to make use of cmpxchg flow-based information"),
102 cl::init(true));
104 static cl::opt<bool>
105 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
106 cl::desc("Run early if-conversion"),
107 cl::init(true));
109 static cl::opt<bool>
110 EnableCondOpt("aarch64-enable-condopt",
111 cl::desc("Enable the condition optimizer pass"),
112 cl::init(true), cl::Hidden);
114 static cl::opt<bool>
115 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
116 cl::desc("Work around Cortex-A53 erratum 835769"),
117 cl::init(false));
119 static cl::opt<bool>
120 EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
121 cl::desc("Enable optimizations on complex GEPs"),
122 cl::init(false));
124 static cl::opt<bool>
125 BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
126 cl::desc("Relax out of range conditional branches"));
128 static cl::opt<bool> EnableCompressJumpTables(
129 "aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true),
130 cl::desc("Use smallest entry possible for jump tables"));
132 // FIXME: Unify control over GlobalMerge.
133 static cl::opt<cl::boolOrDefault>
134 EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
135 cl::desc("Enable the global merge pass"));
137 static cl::opt<bool>
138 EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
139 cl::desc("Enable the loop data prefetch pass"),
140 cl::init(true));
142 static cl::opt<int> EnableGlobalISelAtO(
143 "aarch64-enable-global-isel-at-O", cl::Hidden,
144 cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"),
145 cl::init(0));
147 static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix",
148 cl::init(true), cl::Hidden);
150 static cl::opt<bool>
151 EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden,
152 cl::desc("Enable the AAcrh64 branch target pass"),
153 cl::init(true));
155 extern "C" void LLVMInitializeAArch64Target() {
156 // Register the target.
157 RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget());
158 RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget());
159 RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target());
160 RegisterTargetMachine<AArch64leTargetMachine> W(getTheARM64_32Target());
161 RegisterTargetMachine<AArch64leTargetMachine> V(getTheAArch64_32Target());
162 auto PR = PassRegistry::getPassRegistry();
163 initializeGlobalISel(*PR);
164 initializeAArch64A53Fix835769Pass(*PR);
165 initializeAArch64A57FPLoadBalancingPass(*PR);
166 initializeAArch64AdvSIMDScalarPass(*PR);
167 initializeAArch64BranchTargetsPass(*PR);
168 initializeAArch64CollectLOHPass(*PR);
169 initializeAArch64CompressJumpTablesPass(*PR);
170 initializeAArch64ConditionalComparesPass(*PR);
171 initializeAArch64ConditionOptimizerPass(*PR);
172 initializeAArch64DeadRegisterDefinitionsPass(*PR);
173 initializeAArch64ExpandPseudoPass(*PR);
174 initializeAArch64LoadStoreOptPass(*PR);
175 initializeAArch64SIMDInstrOptPass(*PR);
176 initializeAArch64PreLegalizerCombinerPass(*PR);
177 initializeAArch64PromoteConstantPass(*PR);
178 initializeAArch64RedundantCopyEliminationPass(*PR);
179 initializeAArch64StorePairSuppressPass(*PR);
180 initializeFalkorHWPFFixPass(*PR);
181 initializeFalkorMarkStridedAccessesLegacyPass(*PR);
182 initializeLDTLSCleanupPass(*PR);
183 initializeAArch64SpeculationHardeningPass(*PR);
184 initializeAArch64StackTaggingPass(*PR);
185 initializeAArch64StackTaggingPreRAPass(*PR);
188 //===----------------------------------------------------------------------===//
189 // AArch64 Lowering public interface.
190 //===----------------------------------------------------------------------===//
191 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
192 if (TT.isOSBinFormatMachO())
193 return std::make_unique<AArch64_MachoTargetObjectFile>();
194 if (TT.isOSBinFormatCOFF())
195 return std::make_unique<AArch64_COFFTargetObjectFile>();
197 return std::make_unique<AArch64_ELFTargetObjectFile>();
200 // Helper function to build a DataLayout string
201 static std::string computeDataLayout(const Triple &TT,
202 const MCTargetOptions &Options,
203 bool LittleEndian) {
204 if (Options.getABIName() == "ilp32")
205 return "e-m:e-p:32:32-i8:8-i16:16-i64:64-S128";
206 if (TT.isOSBinFormatMachO()) {
207 if (TT.getArch() == Triple::aarch64_32)
208 return "e-m:o-p:32:32-i64:64-i128:128-n32:64-S128";
209 return "e-m:o-i64:64-i128:128-n32:64-S128";
211 if (TT.isOSBinFormatCOFF())
212 return "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128";
213 if (LittleEndian)
214 return "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
215 return "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
218 static Reloc::Model getEffectiveRelocModel(const Triple &TT,
219 Optional<Reloc::Model> RM) {
220 // AArch64 Darwin and Windows are always PIC.
221 if (TT.isOSDarwin() || TT.isOSWindows())
222 return Reloc::PIC_;
223 // On ELF platforms the default static relocation model has a smart enough
224 // linker to cope with referencing external symbols defined in a shared
225 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
226 if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
227 return Reloc::Static;
228 return *RM;
231 static CodeModel::Model
232 getEffectiveAArch64CodeModel(const Triple &TT, Optional<CodeModel::Model> CM,
233 bool JIT) {
234 if (CM) {
235 if (*CM != CodeModel::Small && *CM != CodeModel::Tiny &&
236 *CM != CodeModel::Large) {
237 if (!TT.isOSFuchsia())
238 report_fatal_error(
239 "Only small, tiny and large code models are allowed on AArch64");
240 else if (*CM != CodeModel::Kernel)
241 report_fatal_error("Only small, tiny, kernel, and large code models "
242 "are allowed on AArch64");
243 } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF())
244 report_fatal_error("tiny code model is only supported on ELF");
245 return *CM;
247 // The default MCJIT memory managers make no guarantees about where they can
248 // find an executable page; JITed code needs to be able to refer to globals
249 // no matter how far away they are.
250 if (JIT)
251 return CodeModel::Large;
252 return CodeModel::Small;
255 /// Create an AArch64 architecture model.
257 AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT,
258 StringRef CPU, StringRef FS,
259 const TargetOptions &Options,
260 Optional<Reloc::Model> RM,
261 Optional<CodeModel::Model> CM,
262 CodeGenOpt::Level OL, bool JIT,
263 bool LittleEndian)
264 : LLVMTargetMachine(T,
265 computeDataLayout(TT, Options.MCOptions, LittleEndian),
266 TT, CPU, FS, Options, getEffectiveRelocModel(TT, RM),
267 getEffectiveAArch64CodeModel(TT, CM, JIT), OL),
268 TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) {
269 initAsmInfo();
271 if (TT.isOSBinFormatMachO()) {
272 this->Options.TrapUnreachable = true;
273 this->Options.NoTrapAfterNoreturn = true;
276 if (getMCAsmInfo()->usesWindowsCFI()) {
277 // Unwinding can get confused if the last instruction in an
278 // exception-handling region (function, funclet, try block, etc.)
279 // is a call.
281 // FIXME: We could elide the trap if the next instruction would be in
282 // the same region anyway.
283 this->Options.TrapUnreachable = true;
286 // Enable GlobalISel at or below EnableGlobalISelAt0, unless this is
287 // MachO/CodeModel::Large, which GlobalISel does not support.
288 if (getOptLevel() <= EnableGlobalISelAtO &&
289 TT.getArch() != Triple::aarch64_32 &&
290 !(getCodeModel() == CodeModel::Large && TT.isOSBinFormatMachO())) {
291 setGlobalISel(true);
292 setGlobalISelAbort(GlobalISelAbortMode::Disable);
295 // AArch64 supports the MachineOutliner.
296 setMachineOutliner(true);
298 // AArch64 supports default outlining behaviour.
299 setSupportsDefaultOutlining(true);
302 AArch64TargetMachine::~AArch64TargetMachine() = default;
304 const AArch64Subtarget *
305 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
306 Attribute CPUAttr = F.getFnAttribute("target-cpu");
307 Attribute FSAttr = F.getFnAttribute("target-features");
309 std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
310 ? CPUAttr.getValueAsString().str()
311 : TargetCPU;
312 std::string FS = !FSAttr.hasAttribute(Attribute::None)
313 ? FSAttr.getValueAsString().str()
314 : TargetFS;
316 auto &I = SubtargetMap[CPU + FS];
317 if (!I) {
318 // This needs to be done before we create a new subtarget since any
319 // creation will depend on the TM and the code generation flags on the
320 // function that reside in TargetOptions.
321 resetTargetOptions(F);
322 I = std::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
323 isLittle);
325 return I.get();
328 void AArch64leTargetMachine::anchor() { }
330 AArch64leTargetMachine::AArch64leTargetMachine(
331 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
332 const TargetOptions &Options, Optional<Reloc::Model> RM,
333 Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT)
334 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {}
336 void AArch64beTargetMachine::anchor() { }
338 AArch64beTargetMachine::AArch64beTargetMachine(
339 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
340 const TargetOptions &Options, Optional<Reloc::Model> RM,
341 Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT)
342 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {}
344 namespace {
346 /// AArch64 Code Generator Pass Configuration Options.
347 class AArch64PassConfig : public TargetPassConfig {
348 public:
349 AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM)
350 : TargetPassConfig(TM, PM) {
351 if (TM.getOptLevel() != CodeGenOpt::None)
352 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
355 AArch64TargetMachine &getAArch64TargetMachine() const {
356 return getTM<AArch64TargetMachine>();
359 ScheduleDAGInstrs *
360 createMachineScheduler(MachineSchedContext *C) const override {
361 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
362 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
363 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
364 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
365 if (ST.hasFusion())
366 DAG->addMutation(createAArch64MacroFusionDAGMutation());
367 return DAG;
370 ScheduleDAGInstrs *
371 createPostMachineScheduler(MachineSchedContext *C) const override {
372 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
373 if (ST.hasFusion()) {
374 // Run the Macro Fusion after RA again since literals are expanded from
375 // pseudos then (v. addPreSched2()).
376 ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
377 DAG->addMutation(createAArch64MacroFusionDAGMutation());
378 return DAG;
381 return nullptr;
384 void addIRPasses() override;
385 bool addPreISel() override;
386 bool addInstSelector() override;
387 bool addIRTranslator() override;
388 void addPreLegalizeMachineIR() override;
389 bool addLegalizeMachineIR() override;
390 bool addRegBankSelect() override;
391 void addPreGlobalInstructionSelect() override;
392 bool addGlobalInstructionSelect() override;
393 bool addILPOpts() override;
394 void addPreRegAlloc() override;
395 void addPostRegAlloc() override;
396 void addPreSched2() override;
397 void addPreEmitPass() override;
399 std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
402 } // end anonymous namespace
404 TargetTransformInfo
405 AArch64TargetMachine::getTargetTransformInfo(const Function &F) {
406 return TargetTransformInfo(AArch64TTIImpl(this, F));
409 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
410 return new AArch64PassConfig(*this, PM);
413 std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
414 return getStandardCSEConfigForOpt(TM->getOptLevel());
417 void AArch64PassConfig::addIRPasses() {
418 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
419 // ourselves.
420 addPass(createAtomicExpandPass());
422 // Cmpxchg instructions are often used with a subsequent comparison to
423 // determine whether it succeeded. We can exploit existing control-flow in
424 // ldrex/strex loops to simplify this, but it needs tidying up.
425 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
426 addPass(createCFGSimplificationPass(1, true, true, false, true));
428 // Run LoopDataPrefetch
430 // Run this before LSR to remove the multiplies involved in computing the
431 // pointer values N iterations ahead.
432 if (TM->getOptLevel() != CodeGenOpt::None) {
433 if (EnableLoopDataPrefetch)
434 addPass(createLoopDataPrefetchPass());
435 if (EnableFalkorHWPFFix)
436 addPass(createFalkorMarkStridedAccessesPass());
439 TargetPassConfig::addIRPasses();
441 // Match interleaved memory accesses to ldN/stN intrinsics.
442 if (TM->getOptLevel() != CodeGenOpt::None) {
443 addPass(createInterleavedLoadCombinePass());
444 addPass(createInterleavedAccessPass());
447 if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
448 // Call SeparateConstOffsetFromGEP pass to extract constants within indices
449 // and lower a GEP with multiple indices to either arithmetic operations or
450 // multiple GEPs with single index.
451 addPass(createSeparateConstOffsetFromGEPPass(true));
452 // Call EarlyCSE pass to find and remove subexpressions in the lowered
453 // result.
454 addPass(createEarlyCSEPass());
455 // Do loop invariant code motion in case part of the lowered result is
456 // invariant.
457 addPass(createLICMPass());
460 addPass(createAArch64StackTaggingPass(/* MergeInit = */ TM->getOptLevel() !=
461 CodeGenOpt::None));
464 // Pass Pipeline Configuration
465 bool AArch64PassConfig::addPreISel() {
466 // Run promote constant before global merge, so that the promoted constants
467 // get a chance to be merged
468 if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
469 addPass(createAArch64PromoteConstantPass());
470 // FIXME: On AArch64, this depends on the type.
471 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
472 // and the offset has to be a multiple of the related size in bytes.
473 if ((TM->getOptLevel() != CodeGenOpt::None &&
474 EnableGlobalMerge == cl::BOU_UNSET) ||
475 EnableGlobalMerge == cl::BOU_TRUE) {
476 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
477 (EnableGlobalMerge == cl::BOU_UNSET);
479 // Merging of extern globals is enabled by default on non-Mach-O as we
480 // expect it to be generally either beneficial or harmless. On Mach-O it
481 // is disabled as we emit the .subsections_via_symbols directive which
482 // means that merging extern globals is not safe.
483 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
485 // FIXME: extern global merging is only enabled when we optimise for size
486 // because there are some regressions with it also enabled for performance.
487 if (!OnlyOptimizeForSize)
488 MergeExternalByDefault = false;
490 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize,
491 MergeExternalByDefault));
494 return false;
497 bool AArch64PassConfig::addInstSelector() {
498 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
500 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
501 // references to _TLS_MODULE_BASE_ as possible.
502 if (TM->getTargetTriple().isOSBinFormatELF() &&
503 getOptLevel() != CodeGenOpt::None)
504 addPass(createAArch64CleanupLocalDynamicTLSPass());
506 return false;
509 bool AArch64PassConfig::addIRTranslator() {
510 addPass(new IRTranslator());
511 return false;
514 void AArch64PassConfig::addPreLegalizeMachineIR() {
515 bool IsOptNone = getOptLevel() == CodeGenOpt::None;
516 addPass(createAArch64PreLegalizeCombiner(IsOptNone));
519 bool AArch64PassConfig::addLegalizeMachineIR() {
520 addPass(new Legalizer());
521 return false;
524 bool AArch64PassConfig::addRegBankSelect() {
525 addPass(new RegBankSelect());
526 return false;
529 void AArch64PassConfig::addPreGlobalInstructionSelect() {
530 addPass(new Localizer());
533 bool AArch64PassConfig::addGlobalInstructionSelect() {
534 addPass(new InstructionSelect());
535 return false;
538 bool AArch64PassConfig::addILPOpts() {
539 if (EnableCondOpt)
540 addPass(createAArch64ConditionOptimizerPass());
541 if (EnableCCMP)
542 addPass(createAArch64ConditionalCompares());
543 if (EnableMCR)
544 addPass(&MachineCombinerID);
545 if (EnableCondBrTuning)
546 addPass(createAArch64CondBrTuning());
547 if (EnableEarlyIfConversion)
548 addPass(&EarlyIfConverterID);
549 if (EnableStPairSuppress)
550 addPass(createAArch64StorePairSuppressPass());
551 addPass(createAArch64SIMDInstrOptPass());
552 if (TM->getOptLevel() != CodeGenOpt::None)
553 addPass(createAArch64StackTaggingPreRAPass());
554 return true;
557 void AArch64PassConfig::addPreRegAlloc() {
558 // Change dead register definitions to refer to the zero register.
559 if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
560 addPass(createAArch64DeadRegisterDefinitions());
562 // Use AdvSIMD scalar instructions whenever profitable.
563 if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
564 addPass(createAArch64AdvSIMDScalar());
565 // The AdvSIMD pass may produce copies that can be rewritten to
566 // be register coaleascer friendly.
567 addPass(&PeepholeOptimizerID);
571 void AArch64PassConfig::addPostRegAlloc() {
572 // Remove redundant copy instructions.
573 if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
574 addPass(createAArch64RedundantCopyEliminationPass());
576 if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
577 // Improve performance for some FP/SIMD code for A57.
578 addPass(createAArch64A57FPLoadBalancing());
581 void AArch64PassConfig::addPreSched2() {
582 // Expand some pseudo instructions to allow proper scheduling.
583 addPass(createAArch64ExpandPseudoPass());
584 // Use load/store pair instructions when possible.
585 if (TM->getOptLevel() != CodeGenOpt::None) {
586 if (EnableLoadStoreOpt)
587 addPass(createAArch64LoadStoreOptimizationPass());
590 // The AArch64SpeculationHardeningPass destroys dominator tree and natural
591 // loop info, which is needed for the FalkorHWPFFixPass and also later on.
592 // Therefore, run the AArch64SpeculationHardeningPass before the
593 // FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop
594 // info.
595 addPass(createAArch64SpeculationHardeningPass());
597 if (TM->getOptLevel() != CodeGenOpt::None) {
598 if (EnableFalkorHWPFFix)
599 addPass(createFalkorHWPFFixPass());
603 void AArch64PassConfig::addPreEmitPass() {
604 // Machine Block Placement might have created new opportunities when run
605 // at O3, where the Tail Duplication Threshold is set to 4 instructions.
606 // Run the load/store optimizer once more.
607 if (TM->getOptLevel() >= CodeGenOpt::Aggressive && EnableLoadStoreOpt)
608 addPass(createAArch64LoadStoreOptimizationPass());
610 if (EnableA53Fix835769)
611 addPass(createAArch64A53Fix835769());
612 // Relax conditional branch instructions if they're otherwise out of
613 // range of their destination.
614 if (BranchRelaxation)
615 addPass(&BranchRelaxationPassID);
617 if (EnableBranchTargets)
618 addPass(createAArch64BranchTargetsPass());
620 if (TM->getOptLevel() != CodeGenOpt::None && EnableCompressJumpTables)
621 addPass(createAArch64CompressJumpTablesPass());
623 if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
624 TM->getTargetTriple().isOSBinFormatMachO())
625 addPass(createAArch64CollectLOHPass());