[SampleProfileLoader] Fix integer overflow in generateMDProfMetadata (#90217)
[llvm-project.git] / llvm / lib / Target / AArch64 / AArch64TargetMachine.cpp
blobdf802cf42526dade1a85c9df6ba95571e5e612ea
1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //
10 //===----------------------------------------------------------------------===//
12 #include "AArch64TargetMachine.h"
13 #include "AArch64.h"
14 #include "AArch64LoopIdiomTransform.h"
15 #include "AArch64MachineFunctionInfo.h"
16 #include "AArch64MachineScheduler.h"
17 #include "AArch64MacroFusion.h"
18 #include "AArch64Subtarget.h"
19 #include "AArch64TargetObjectFile.h"
20 #include "AArch64TargetTransformInfo.h"
21 #include "MCTargetDesc/AArch64MCTargetDesc.h"
22 #include "TargetInfo/AArch64TargetInfo.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/Analysis/TargetTransformInfo.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/CodeGen/CFIFixup.h"
27 #include "llvm/CodeGen/CSEConfigBase.h"
28 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
29 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
30 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
31 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
32 #include "llvm/CodeGen/GlobalISel/LoadStoreOpt.h"
33 #include "llvm/CodeGen/GlobalISel/Localizer.h"
34 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
35 #include "llvm/CodeGen/MIRParser/MIParser.h"
36 #include "llvm/CodeGen/MachineScheduler.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/CodeGen/TargetInstrInfo.h"
39 #include "llvm/CodeGen/TargetPassConfig.h"
40 #include "llvm/IR/Attributes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/InitializePasses.h"
43 #include "llvm/MC/MCAsmInfo.h"
44 #include "llvm/MC/MCTargetOptions.h"
45 #include "llvm/MC/TargetRegistry.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Passes/PassBuilder.h"
48 #include "llvm/Support/CodeGen.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Target/TargetLoweringObjectFile.h"
51 #include "llvm/Target/TargetOptions.h"
52 #include "llvm/TargetParser/Triple.h"
53 #include "llvm/Transforms/CFGuard.h"
54 #include "llvm/Transforms/Scalar.h"
55 #include <memory>
56 #include <optional>
57 #include <string>
59 using namespace llvm;
61 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
62 cl::desc("Enable the CCMP formation pass"),
63 cl::init(true), cl::Hidden);
65 static cl::opt<bool>
66 EnableCondBrTuning("aarch64-enable-cond-br-tune",
67 cl::desc("Enable the conditional branch tuning pass"),
68 cl::init(true), cl::Hidden);
70 static cl::opt<bool> EnableAArch64CopyPropagation(
71 "aarch64-enable-copy-propagation",
72 cl::desc("Enable the copy propagation with AArch64 copy instr"),
73 cl::init(true), cl::Hidden);
75 static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
76 cl::desc("Enable the machine combiner pass"),
77 cl::init(true), cl::Hidden);
79 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
80 cl::desc("Suppress STP for AArch64"),
81 cl::init(true), cl::Hidden);
83 static cl::opt<bool> EnableAdvSIMDScalar(
84 "aarch64-enable-simd-scalar",
85 cl::desc("Enable use of AdvSIMD scalar integer instructions"),
86 cl::init(false), cl::Hidden);
88 static cl::opt<bool>
89 EnablePromoteConstant("aarch64-enable-promote-const",
90 cl::desc("Enable the promote constant pass"),
91 cl::init(true), cl::Hidden);
93 static cl::opt<bool> EnableCollectLOH(
94 "aarch64-enable-collect-loh",
95 cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
96 cl::init(true), cl::Hidden);
98 static cl::opt<bool>
99 EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
100 cl::desc("Enable the pass that removes dead"
101 " definitons and replaces stores to"
102 " them with stores to the zero"
103 " register"),
104 cl::init(true));
106 static cl::opt<bool> EnableRedundantCopyElimination(
107 "aarch64-enable-copyelim",
108 cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
109 cl::Hidden);
111 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
112 cl::desc("Enable the load/store pair"
113 " optimization pass"),
114 cl::init(true), cl::Hidden);
116 static cl::opt<bool> EnableAtomicTidy(
117 "aarch64-enable-atomic-cfg-tidy", cl::Hidden,
118 cl::desc("Run SimplifyCFG after expanding atomic operations"
119 " to make use of cmpxchg flow-based information"),
120 cl::init(true));
122 static cl::opt<bool>
123 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
124 cl::desc("Run early if-conversion"),
125 cl::init(true));
127 static cl::opt<bool>
128 EnableCondOpt("aarch64-enable-condopt",
129 cl::desc("Enable the condition optimizer pass"),
130 cl::init(true), cl::Hidden);
132 static cl::opt<bool>
133 EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
134 cl::desc("Enable optimizations on complex GEPs"),
135 cl::init(false));
137 static cl::opt<bool>
138 EnableSelectOpt("aarch64-select-opt", cl::Hidden,
139 cl::desc("Enable select to branch optimizations"),
140 cl::init(true));
142 static cl::opt<bool>
143 BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
144 cl::desc("Relax out of range conditional branches"));
146 static cl::opt<bool> EnableCompressJumpTables(
147 "aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true),
148 cl::desc("Use smallest entry possible for jump tables"));
150 // FIXME: Unify control over GlobalMerge.
151 static cl::opt<cl::boolOrDefault>
152 EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
153 cl::desc("Enable the global merge pass"));
155 static cl::opt<bool>
156 EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
157 cl::desc("Enable the loop data prefetch pass"),
158 cl::init(true));
160 static cl::opt<int> EnableGlobalISelAtO(
161 "aarch64-enable-global-isel-at-O", cl::Hidden,
162 cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"),
163 cl::init(0));
165 static cl::opt<bool>
166 EnableSVEIntrinsicOpts("aarch64-enable-sve-intrinsic-opts", cl::Hidden,
167 cl::desc("Enable SVE intrinsic opts"),
168 cl::init(true));
170 static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix",
171 cl::init(true), cl::Hidden);
173 static cl::opt<bool>
174 EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden,
175 cl::desc("Enable the AArch64 branch target pass"),
176 cl::init(true));
178 static cl::opt<unsigned> SVEVectorBitsMaxOpt(
179 "aarch64-sve-vector-bits-max",
180 cl::desc("Assume SVE vector registers are at most this big, "
181 "with zero meaning no maximum size is assumed."),
182 cl::init(0), cl::Hidden);
184 static cl::opt<unsigned> SVEVectorBitsMinOpt(
185 "aarch64-sve-vector-bits-min",
186 cl::desc("Assume SVE vector registers are at least this big, "
187 "with zero meaning no minimum size is assumed."),
188 cl::init(0), cl::Hidden);
190 extern cl::opt<bool> EnableHomogeneousPrologEpilog;
192 static cl::opt<bool> EnableGISelLoadStoreOptPreLegal(
193 "aarch64-enable-gisel-ldst-prelegal",
194 cl::desc("Enable GlobalISel's pre-legalizer load/store optimization pass"),
195 cl::init(true), cl::Hidden);
197 static cl::opt<bool> EnableGISelLoadStoreOptPostLegal(
198 "aarch64-enable-gisel-ldst-postlegal",
199 cl::desc("Enable GlobalISel's post-legalizer load/store optimization pass"),
200 cl::init(false), cl::Hidden);
202 static cl::opt<bool>
203 EnableSinkFold("aarch64-enable-sink-fold",
204 cl::desc("Enable sinking and folding of instruction copies"),
205 cl::init(true), cl::Hidden);
207 static cl::opt<bool>
208 EnableMachinePipeliner("aarch64-enable-pipeliner",
209 cl::desc("Enable Machine Pipeliner for AArch64"),
210 cl::init(false), cl::Hidden);
212 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64Target() {
213 // Register the target.
214 RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget());
215 RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget());
216 RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target());
217 RegisterTargetMachine<AArch64leTargetMachine> W(getTheARM64_32Target());
218 RegisterTargetMachine<AArch64leTargetMachine> V(getTheAArch64_32Target());
219 auto PR = PassRegistry::getPassRegistry();
220 initializeGlobalISel(*PR);
221 initializeAArch64A53Fix835769Pass(*PR);
222 initializeAArch64A57FPLoadBalancingPass(*PR);
223 initializeAArch64AdvSIMDScalarPass(*PR);
224 initializeAArch64BranchTargetsPass(*PR);
225 initializeAArch64CollectLOHPass(*PR);
226 initializeAArch64CompressJumpTablesPass(*PR);
227 initializeAArch64ConditionalComparesPass(*PR);
228 initializeAArch64ConditionOptimizerPass(*PR);
229 initializeAArch64DeadRegisterDefinitionsPass(*PR);
230 initializeAArch64ExpandPseudoPass(*PR);
231 initializeAArch64LoadStoreOptPass(*PR);
232 initializeAArch64LoopIdiomTransformLegacyPassPass(*PR);
233 initializeAArch64MIPeepholeOptPass(*PR);
234 initializeAArch64SIMDInstrOptPass(*PR);
235 initializeAArch64O0PreLegalizerCombinerPass(*PR);
236 initializeAArch64PreLegalizerCombinerPass(*PR);
237 initializeAArch64PointerAuthPass(*PR);
238 initializeAArch64PostCoalescerPass(*PR);
239 initializeAArch64PostLegalizerCombinerPass(*PR);
240 initializeAArch64PostLegalizerLoweringPass(*PR);
241 initializeAArch64PostSelectOptimizePass(*PR);
242 initializeAArch64PromoteConstantPass(*PR);
243 initializeAArch64RedundantCopyEliminationPass(*PR);
244 initializeAArch64StorePairSuppressPass(*PR);
245 initializeFalkorHWPFFixPass(*PR);
246 initializeFalkorMarkStridedAccessesLegacyPass(*PR);
247 initializeLDTLSCleanupPass(*PR);
248 initializeKCFIPass(*PR);
249 initializeSMEABIPass(*PR);
250 initializeSVEIntrinsicOptsPass(*PR);
251 initializeAArch64SpeculationHardeningPass(*PR);
252 initializeAArch64SLSHardeningPass(*PR);
253 initializeAArch64StackTaggingPass(*PR);
254 initializeAArch64StackTaggingPreRAPass(*PR);
255 initializeAArch64LowerHomogeneousPrologEpilogPass(*PR);
256 initializeAArch64DAGToDAGISelPass(*PR);
257 initializeAArch64GlobalsTaggingPass(*PR);
260 //===----------------------------------------------------------------------===//
261 // AArch64 Lowering public interface.
262 //===----------------------------------------------------------------------===//
263 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
264 if (TT.isOSBinFormatMachO())
265 return std::make_unique<AArch64_MachoTargetObjectFile>();
266 if (TT.isOSBinFormatCOFF())
267 return std::make_unique<AArch64_COFFTargetObjectFile>();
269 return std::make_unique<AArch64_ELFTargetObjectFile>();
272 // Helper function to build a DataLayout string
273 static std::string computeDataLayout(const Triple &TT,
274 const MCTargetOptions &Options,
275 bool LittleEndian) {
276 if (TT.isOSBinFormatMachO()) {
277 if (TT.getArch() == Triple::aarch64_32)
278 return "e-m:o-p:32:32-i64:64-i128:128-n32:64-S128-Fn32";
279 return "e-m:o-i64:64-i128:128-n32:64-S128-Fn32";
281 if (TT.isOSBinFormatCOFF())
282 return "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128-Fn32";
283 std::string Endian = LittleEndian ? "e" : "E";
284 std::string Ptr32 = TT.getEnvironment() == Triple::GNUILP32 ? "-p:32:32" : "";
285 return Endian + "-m:e" + Ptr32 +
286 "-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32";
289 static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU) {
290 if (CPU.empty() && TT.isArm64e())
291 return "apple-a12";
292 return CPU;
295 static Reloc::Model getEffectiveRelocModel(const Triple &TT,
296 std::optional<Reloc::Model> RM) {
297 // AArch64 Darwin and Windows are always PIC.
298 if (TT.isOSDarwin() || TT.isOSWindows())
299 return Reloc::PIC_;
300 // On ELF platforms the default static relocation model has a smart enough
301 // linker to cope with referencing external symbols defined in a shared
302 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
303 if (!RM || *RM == Reloc::DynamicNoPIC)
304 return Reloc::Static;
305 return *RM;
308 static CodeModel::Model
309 getEffectiveAArch64CodeModel(const Triple &TT,
310 std::optional<CodeModel::Model> CM, bool JIT) {
311 if (CM) {
312 if (*CM != CodeModel::Small && *CM != CodeModel::Tiny &&
313 *CM != CodeModel::Large) {
314 report_fatal_error(
315 "Only small, tiny and large code models are allowed on AArch64");
316 } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF())
317 report_fatal_error("tiny code model is only supported on ELF");
318 return *CM;
320 // The default MCJIT memory managers make no guarantees about where they can
321 // find an executable page; JITed code needs to be able to refer to globals
322 // no matter how far away they are.
323 // We should set the CodeModel::Small for Windows ARM64 in JIT mode,
324 // since with large code model LLVM generating 4 MOV instructions, and
325 // Windows doesn't support relocating these long branch (4 MOVs).
326 if (JIT && !TT.isOSWindows())
327 return CodeModel::Large;
328 return CodeModel::Small;
331 /// Create an AArch64 architecture model.
333 AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT,
334 StringRef CPU, StringRef FS,
335 const TargetOptions &Options,
336 std::optional<Reloc::Model> RM,
337 std::optional<CodeModel::Model> CM,
338 CodeGenOptLevel OL, bool JIT,
339 bool LittleEndian)
340 : LLVMTargetMachine(T,
341 computeDataLayout(TT, Options.MCOptions, LittleEndian),
342 TT, computeDefaultCPU(TT, CPU), FS, Options,
343 getEffectiveRelocModel(TT, RM),
344 getEffectiveAArch64CodeModel(TT, CM, JIT), OL),
345 TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) {
346 initAsmInfo();
348 if (TT.isOSBinFormatMachO()) {
349 this->Options.TrapUnreachable = true;
350 this->Options.NoTrapAfterNoreturn = true;
353 if (getMCAsmInfo()->usesWindowsCFI()) {
354 // Unwinding can get confused if the last instruction in an
355 // exception-handling region (function, funclet, try block, etc.)
356 // is a call.
358 // FIXME: We could elide the trap if the next instruction would be in
359 // the same region anyway.
360 this->Options.TrapUnreachable = true;
363 if (this->Options.TLSSize == 0) // default
364 this->Options.TLSSize = 24;
365 if ((getCodeModel() == CodeModel::Small ||
366 getCodeModel() == CodeModel::Kernel) &&
367 this->Options.TLSSize > 32)
368 // for the small (and kernel) code model, the maximum TLS size is 4GiB
369 this->Options.TLSSize = 32;
370 else if (getCodeModel() == CodeModel::Tiny && this->Options.TLSSize > 24)
371 // for the tiny code model, the maximum TLS size is 1MiB (< 16MiB)
372 this->Options.TLSSize = 24;
374 // Enable GlobalISel at or below EnableGlobalISelAt0, unless this is
375 // MachO/CodeModel::Large, which GlobalISel does not support.
376 if (static_cast<int>(getOptLevel()) <= EnableGlobalISelAtO &&
377 TT.getArch() != Triple::aarch64_32 &&
378 TT.getEnvironment() != Triple::GNUILP32 &&
379 !(getCodeModel() == CodeModel::Large && TT.isOSBinFormatMachO())) {
380 setGlobalISel(true);
381 setGlobalISelAbort(GlobalISelAbortMode::Disable);
384 // AArch64 supports the MachineOutliner.
385 setMachineOutliner(true);
387 // AArch64 supports default outlining behaviour.
388 setSupportsDefaultOutlining(true);
390 // AArch64 supports the debug entry values.
391 setSupportsDebugEntryValues(true);
393 // AArch64 supports fixing up the DWARF unwind information.
394 if (!getMCAsmInfo()->usesWindowsCFI())
395 setCFIFixup(true);
398 AArch64TargetMachine::~AArch64TargetMachine() = default;
400 const AArch64Subtarget *
401 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
402 Attribute CPUAttr = F.getFnAttribute("target-cpu");
403 Attribute TuneAttr = F.getFnAttribute("tune-cpu");
404 Attribute FSAttr = F.getFnAttribute("target-features");
406 StringRef CPU = CPUAttr.isValid() ? CPUAttr.getValueAsString() : TargetCPU;
407 StringRef TuneCPU = TuneAttr.isValid() ? TuneAttr.getValueAsString() : CPU;
408 StringRef FS = FSAttr.isValid() ? FSAttr.getValueAsString() : TargetFS;
409 bool HasMinSize = F.hasMinSize();
411 bool StreamingSVEMode = F.hasFnAttribute("aarch64_pstate_sm_enabled") ||
412 F.hasFnAttribute("aarch64_pstate_sm_body");
413 bool StreamingCompatibleSVEMode =
414 F.hasFnAttribute("aarch64_pstate_sm_compatible");
416 unsigned MinSVEVectorSize = 0;
417 unsigned MaxSVEVectorSize = 0;
418 if (F.hasFnAttribute(Attribute::VScaleRange)) {
419 ConstantRange CR = getVScaleRange(&F, 64);
420 MinSVEVectorSize = CR.getUnsignedMin().getZExtValue() * 128;
421 MaxSVEVectorSize = CR.getUnsignedMax().getZExtValue() * 128;
422 } else {
423 MinSVEVectorSize = SVEVectorBitsMinOpt;
424 MaxSVEVectorSize = SVEVectorBitsMaxOpt;
427 assert(MinSVEVectorSize % 128 == 0 &&
428 "SVE requires vector length in multiples of 128!");
429 assert(MaxSVEVectorSize % 128 == 0 &&
430 "SVE requires vector length in multiples of 128!");
431 assert((MaxSVEVectorSize >= MinSVEVectorSize || MaxSVEVectorSize == 0) &&
432 "Minimum SVE vector size should not be larger than its maximum!");
434 // Sanitize user input in case of no asserts
435 if (MaxSVEVectorSize != 0) {
436 MinSVEVectorSize = std::min(MinSVEVectorSize, MaxSVEVectorSize);
437 MaxSVEVectorSize = std::max(MinSVEVectorSize, MaxSVEVectorSize);
440 SmallString<512> Key;
441 raw_svector_ostream(Key) << "SVEMin" << MinSVEVectorSize << "SVEMax"
442 << MaxSVEVectorSize
443 << "StreamingSVEMode=" << StreamingSVEMode
444 << "StreamingCompatibleSVEMode="
445 << StreamingCompatibleSVEMode << CPU << TuneCPU << FS
446 << "HasMinSize=" << HasMinSize;
448 auto &I = SubtargetMap[Key];
449 if (!I) {
450 // This needs to be done before we create a new subtarget since any
451 // creation will depend on the TM and the code generation flags on the
452 // function that reside in TargetOptions.
453 resetTargetOptions(F);
454 I = std::make_unique<AArch64Subtarget>(
455 TargetTriple, CPU, TuneCPU, FS, *this, isLittle, MinSVEVectorSize,
456 MaxSVEVectorSize, StreamingSVEMode, StreamingCompatibleSVEMode,
457 HasMinSize);
460 assert((!StreamingSVEMode || I->hasSME()) &&
461 "Expected SME to be available");
463 return I.get();
466 void AArch64leTargetMachine::anchor() { }
468 AArch64leTargetMachine::AArch64leTargetMachine(
469 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
470 const TargetOptions &Options, std::optional<Reloc::Model> RM,
471 std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT)
472 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {}
474 void AArch64beTargetMachine::anchor() { }
476 AArch64beTargetMachine::AArch64beTargetMachine(
477 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
478 const TargetOptions &Options, std::optional<Reloc::Model> RM,
479 std::optional<CodeModel::Model> CM, CodeGenOptLevel OL, bool JIT)
480 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {}
482 namespace {
484 /// AArch64 Code Generator Pass Configuration Options.
485 class AArch64PassConfig : public TargetPassConfig {
486 public:
487 AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM)
488 : TargetPassConfig(TM, PM) {
489 if (TM.getOptLevel() != CodeGenOptLevel::None)
490 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
491 setEnableSinkAndFold(EnableSinkFold);
494 AArch64TargetMachine &getAArch64TargetMachine() const {
495 return getTM<AArch64TargetMachine>();
498 ScheduleDAGInstrs *
499 createMachineScheduler(MachineSchedContext *C) const override {
500 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
501 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
502 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
503 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
504 if (ST.hasFusion())
505 DAG->addMutation(createAArch64MacroFusionDAGMutation());
506 return DAG;
509 ScheduleDAGInstrs *
510 createPostMachineScheduler(MachineSchedContext *C) const override {
511 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
512 ScheduleDAGMI *DAG =
513 new ScheduleDAGMI(C, std::make_unique<AArch64PostRASchedStrategy>(C),
514 /* RemoveKillFlags=*/true);
515 if (ST.hasFusion()) {
516 // Run the Macro Fusion after RA again since literals are expanded from
517 // pseudos then (v. addPreSched2()).
518 DAG->addMutation(createAArch64MacroFusionDAGMutation());
519 return DAG;
522 return DAG;
525 void addIRPasses() override;
526 bool addPreISel() override;
527 void addCodeGenPrepare() override;
528 bool addInstSelector() override;
529 bool addIRTranslator() override;
530 void addPreLegalizeMachineIR() override;
531 bool addLegalizeMachineIR() override;
532 void addPreRegBankSelect() override;
533 bool addRegBankSelect() override;
534 bool addGlobalInstructionSelect() override;
535 void addMachineSSAOptimization() override;
536 bool addILPOpts() override;
537 void addPreRegAlloc() override;
538 void addPostRegAlloc() override;
539 void addPreSched2() override;
540 void addPreEmitPass() override;
541 void addPostBBSections() override;
542 void addPreEmitPass2() override;
543 bool addRegAssignAndRewriteOptimized() override;
545 std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
548 } // end anonymous namespace
550 void AArch64TargetMachine::registerPassBuilderCallbacks(
551 PassBuilder &PB, bool PopulateClassToPassNames) {
553 #define GET_PASS_REGISTRY "AArch64PassRegistry.def"
554 #include "llvm/Passes/TargetPassRegistry.inc"
556 PB.registerLateLoopOptimizationsEPCallback(
557 [=](LoopPassManager &LPM, OptimizationLevel Level) {
558 LPM.addPass(AArch64LoopIdiomTransformPass());
562 TargetTransformInfo
563 AArch64TargetMachine::getTargetTransformInfo(const Function &F) const {
564 return TargetTransformInfo(AArch64TTIImpl(this, F));
567 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
568 return new AArch64PassConfig(*this, PM);
571 std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
572 return getStandardCSEConfigForOpt(TM->getOptLevel());
575 void AArch64PassConfig::addIRPasses() {
576 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
577 // ourselves.
578 addPass(createAtomicExpandLegacyPass());
580 // Expand any SVE vector library calls that we can't code generate directly.
581 if (EnableSVEIntrinsicOpts &&
582 TM->getOptLevel() == CodeGenOptLevel::Aggressive)
583 addPass(createSVEIntrinsicOptsPass());
585 // Cmpxchg instructions are often used with a subsequent comparison to
586 // determine whether it succeeded. We can exploit existing control-flow in
587 // ldrex/strex loops to simplify this, but it needs tidying up.
588 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableAtomicTidy)
589 addPass(createCFGSimplificationPass(SimplifyCFGOptions()
590 .forwardSwitchCondToPhi(true)
591 .convertSwitchRangeToICmp(true)
592 .convertSwitchToLookupTable(true)
593 .needCanonicalLoops(false)
594 .hoistCommonInsts(true)
595 .sinkCommonInsts(true)));
597 // Run LoopDataPrefetch
599 // Run this before LSR to remove the multiplies involved in computing the
600 // pointer values N iterations ahead.
601 if (TM->getOptLevel() != CodeGenOptLevel::None) {
602 if (EnableLoopDataPrefetch)
603 addPass(createLoopDataPrefetchPass());
604 if (EnableFalkorHWPFFix)
605 addPass(createFalkorMarkStridedAccessesPass());
608 if (EnableGEPOpt) {
609 // Call SeparateConstOffsetFromGEP pass to extract constants within indices
610 // and lower a GEP with multiple indices to either arithmetic operations or
611 // multiple GEPs with single index.
612 addPass(createSeparateConstOffsetFromGEPPass(true));
613 // Call EarlyCSE pass to find and remove subexpressions in the lowered
614 // result.
615 addPass(createEarlyCSEPass());
616 // Do loop invariant code motion in case part of the lowered result is
617 // invariant.
618 addPass(createLICMPass());
621 TargetPassConfig::addIRPasses();
623 if (getOptLevel() == CodeGenOptLevel::Aggressive && EnableSelectOpt)
624 addPass(createSelectOptimizePass());
626 addPass(createAArch64GlobalsTaggingPass());
627 addPass(createAArch64StackTaggingPass(
628 /*IsOptNone=*/TM->getOptLevel() == CodeGenOptLevel::None));
630 // Match complex arithmetic patterns
631 if (TM->getOptLevel() >= CodeGenOptLevel::Default)
632 addPass(createComplexDeinterleavingPass(TM));
634 // Match interleaved memory accesses to ldN/stN intrinsics.
635 if (TM->getOptLevel() != CodeGenOptLevel::None) {
636 addPass(createInterleavedLoadCombinePass());
637 addPass(createInterleavedAccessPass());
640 // Expand any functions marked with SME attributes which require special
641 // changes for the calling convention or that require the lazy-saving
642 // mechanism specified in the SME ABI.
643 addPass(createSMEABIPass());
645 // Add Control Flow Guard checks.
646 if (TM->getTargetTriple().isOSWindows()) {
647 if (TM->getTargetTriple().isWindowsArm64EC())
648 addPass(createAArch64Arm64ECCallLoweringPass());
649 else
650 addPass(createCFGuardCheckPass());
653 if (TM->Options.JMCInstrument)
654 addPass(createJMCInstrumenterPass());
657 // Pass Pipeline Configuration
658 bool AArch64PassConfig::addPreISel() {
659 // Run promote constant before global merge, so that the promoted constants
660 // get a chance to be merged
661 if (TM->getOptLevel() != CodeGenOptLevel::None && EnablePromoteConstant)
662 addPass(createAArch64PromoteConstantPass());
663 // FIXME: On AArch64, this depends on the type.
664 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
665 // and the offset has to be a multiple of the related size in bytes.
666 if ((TM->getOptLevel() != CodeGenOptLevel::None &&
667 EnableGlobalMerge == cl::BOU_UNSET) ||
668 EnableGlobalMerge == cl::BOU_TRUE) {
669 bool OnlyOptimizeForSize =
670 (TM->getOptLevel() < CodeGenOptLevel::Aggressive) &&
671 (EnableGlobalMerge == cl::BOU_UNSET);
673 // Merging of extern globals is enabled by default on non-Mach-O as we
674 // expect it to be generally either beneficial or harmless. On Mach-O it
675 // is disabled as we emit the .subsections_via_symbols directive which
676 // means that merging extern globals is not safe.
677 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
679 // FIXME: extern global merging is only enabled when we optimise for size
680 // because there are some regressions with it also enabled for performance.
681 if (!OnlyOptimizeForSize)
682 MergeExternalByDefault = false;
684 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize,
685 MergeExternalByDefault));
688 return false;
691 void AArch64PassConfig::addCodeGenPrepare() {
692 if (getOptLevel() != CodeGenOptLevel::None)
693 addPass(createTypePromotionLegacyPass());
694 TargetPassConfig::addCodeGenPrepare();
697 bool AArch64PassConfig::addInstSelector() {
698 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
700 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
701 // references to _TLS_MODULE_BASE_ as possible.
702 if (TM->getTargetTriple().isOSBinFormatELF() &&
703 getOptLevel() != CodeGenOptLevel::None)
704 addPass(createAArch64CleanupLocalDynamicTLSPass());
706 return false;
709 bool AArch64PassConfig::addIRTranslator() {
710 addPass(new IRTranslator(getOptLevel()));
711 return false;
714 void AArch64PassConfig::addPreLegalizeMachineIR() {
715 if (getOptLevel() == CodeGenOptLevel::None) {
716 addPass(createAArch64O0PreLegalizerCombiner());
717 addPass(new Localizer());
718 } else {
719 addPass(createAArch64PreLegalizerCombiner());
720 addPass(new Localizer());
721 if (EnableGISelLoadStoreOptPreLegal)
722 addPass(new LoadStoreOpt());
726 bool AArch64PassConfig::addLegalizeMachineIR() {
727 addPass(new Legalizer());
728 return false;
731 void AArch64PassConfig::addPreRegBankSelect() {
732 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
733 if (!IsOptNone) {
734 addPass(createAArch64PostLegalizerCombiner(IsOptNone));
735 if (EnableGISelLoadStoreOptPostLegal)
736 addPass(new LoadStoreOpt());
738 addPass(createAArch64PostLegalizerLowering());
741 bool AArch64PassConfig::addRegBankSelect() {
742 addPass(new RegBankSelect());
743 return false;
746 bool AArch64PassConfig::addGlobalInstructionSelect() {
747 addPass(new InstructionSelect(getOptLevel()));
748 if (getOptLevel() != CodeGenOptLevel::None)
749 addPass(createAArch64PostSelectOptimize());
750 return false;
753 void AArch64PassConfig::addMachineSSAOptimization() {
754 // Run default MachineSSAOptimization first.
755 TargetPassConfig::addMachineSSAOptimization();
757 if (TM->getOptLevel() != CodeGenOptLevel::None)
758 addPass(createAArch64MIPeepholeOptPass());
761 bool AArch64PassConfig::addILPOpts() {
762 if (EnableCondOpt)
763 addPass(createAArch64ConditionOptimizerPass());
764 if (EnableCCMP)
765 addPass(createAArch64ConditionalCompares());
766 if (EnableMCR)
767 addPass(&MachineCombinerID);
768 if (EnableCondBrTuning)
769 addPass(createAArch64CondBrTuning());
770 if (EnableEarlyIfConversion)
771 addPass(&EarlyIfConverterID);
772 if (EnableStPairSuppress)
773 addPass(createAArch64StorePairSuppressPass());
774 addPass(createAArch64SIMDInstrOptPass());
775 if (TM->getOptLevel() != CodeGenOptLevel::None)
776 addPass(createAArch64StackTaggingPreRAPass());
777 return true;
780 void AArch64PassConfig::addPreRegAlloc() {
781 // Change dead register definitions to refer to the zero register.
782 if (TM->getOptLevel() != CodeGenOptLevel::None &&
783 EnableDeadRegisterElimination)
784 addPass(createAArch64DeadRegisterDefinitions());
786 // Use AdvSIMD scalar instructions whenever profitable.
787 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableAdvSIMDScalar) {
788 addPass(createAArch64AdvSIMDScalar());
789 // The AdvSIMD pass may produce copies that can be rewritten to
790 // be register coalescer friendly.
791 addPass(&PeepholeOptimizerID);
793 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableMachinePipeliner)
794 addPass(&MachinePipelinerID);
797 void AArch64PassConfig::addPostRegAlloc() {
798 // Remove redundant copy instructions.
799 if (TM->getOptLevel() != CodeGenOptLevel::None &&
800 EnableRedundantCopyElimination)
801 addPass(createAArch64RedundantCopyEliminationPass());
803 if (TM->getOptLevel() != CodeGenOptLevel::None && usingDefaultRegAlloc())
804 // Improve performance for some FP/SIMD code for A57.
805 addPass(createAArch64A57FPLoadBalancing());
808 void AArch64PassConfig::addPreSched2() {
809 // Lower homogeneous frame instructions
810 if (EnableHomogeneousPrologEpilog)
811 addPass(createAArch64LowerHomogeneousPrologEpilogPass());
812 // Expand some pseudo instructions to allow proper scheduling.
813 addPass(createAArch64ExpandPseudoPass());
814 // Use load/store pair instructions when possible.
815 if (TM->getOptLevel() != CodeGenOptLevel::None) {
816 if (EnableLoadStoreOpt)
817 addPass(createAArch64LoadStoreOptimizationPass());
819 // Emit KCFI checks for indirect calls.
820 addPass(createKCFIPass());
822 // The AArch64SpeculationHardeningPass destroys dominator tree and natural
823 // loop info, which is needed for the FalkorHWPFFixPass and also later on.
824 // Therefore, run the AArch64SpeculationHardeningPass before the
825 // FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop
826 // info.
827 addPass(createAArch64SpeculationHardeningPass());
829 if (TM->getOptLevel() != CodeGenOptLevel::None) {
830 if (EnableFalkorHWPFFix)
831 addPass(createFalkorHWPFFixPass());
835 void AArch64PassConfig::addPreEmitPass() {
836 // Machine Block Placement might have created new opportunities when run
837 // at O3, where the Tail Duplication Threshold is set to 4 instructions.
838 // Run the load/store optimizer once more.
839 if (TM->getOptLevel() >= CodeGenOptLevel::Aggressive && EnableLoadStoreOpt)
840 addPass(createAArch64LoadStoreOptimizationPass());
842 if (TM->getOptLevel() >= CodeGenOptLevel::Aggressive &&
843 EnableAArch64CopyPropagation)
844 addPass(createMachineCopyPropagationPass(true));
846 addPass(createAArch64A53Fix835769());
848 if (TM->getTargetTriple().isOSWindows()) {
849 // Identify valid longjmp targets for Windows Control Flow Guard.
850 addPass(createCFGuardLongjmpPass());
851 // Identify valid eh continuation targets for Windows EHCont Guard.
852 addPass(createEHContGuardCatchretPass());
855 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableCollectLOH &&
856 TM->getTargetTriple().isOSBinFormatMachO())
857 addPass(createAArch64CollectLOHPass());
860 void AArch64PassConfig::addPostBBSections() {
861 addPass(createAArch64IndirectThunks());
862 addPass(createAArch64SLSHardeningPass());
863 addPass(createAArch64PointerAuthPass());
864 if (EnableBranchTargets)
865 addPass(createAArch64BranchTargetsPass());
866 // Relax conditional branch instructions if they're otherwise out of
867 // range of their destination.
868 if (BranchRelaxation)
869 addPass(&BranchRelaxationPassID);
871 if (TM->getOptLevel() != CodeGenOptLevel::None && EnableCompressJumpTables)
872 addPass(createAArch64CompressJumpTablesPass());
875 void AArch64PassConfig::addPreEmitPass2() {
876 // SVE bundles move prefixes with destructive operations. BLR_RVMARKER pseudo
877 // instructions are lowered to bundles as well.
878 addPass(createUnpackMachineBundles(nullptr));
881 bool AArch64PassConfig::addRegAssignAndRewriteOptimized() {
882 addPass(createAArch64PostCoalescerPass());
883 return TargetPassConfig::addRegAssignAndRewriteOptimized();
886 MachineFunctionInfo *AArch64TargetMachine::createMachineFunctionInfo(
887 BumpPtrAllocator &Allocator, const Function &F,
888 const TargetSubtargetInfo *STI) const {
889 return AArch64FunctionInfo::create<AArch64FunctionInfo>(
890 Allocator, F, static_cast<const AArch64Subtarget *>(STI));
893 yaml::MachineFunctionInfo *
894 AArch64TargetMachine::createDefaultFuncInfoYAML() const {
895 return new yaml::AArch64FunctionInfo();
898 yaml::MachineFunctionInfo *
899 AArch64TargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
900 const auto *MFI = MF.getInfo<AArch64FunctionInfo>();
901 return new yaml::AArch64FunctionInfo(*MFI);
904 bool AArch64TargetMachine::parseMachineFunctionInfo(
905 const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS,
906 SMDiagnostic &Error, SMRange &SourceRange) const {
907 const auto &YamlMFI = static_cast<const yaml::AArch64FunctionInfo &>(MFI);
908 MachineFunction &MF = PFS.MF;
909 MF.getInfo<AArch64FunctionInfo>()->initializeBaseYamlFields(YamlMFI);
910 return false;