[PowerPC] Collect some CallLowering arguments into a struct. [NFC]
[llvm-project.git] / llvm / lib / Target / AArch64 / AArch64ExpandPseudoInsts.cpp
blob6bce30fab078b2c3f09b13f76ad6f8c630f0fad1
1 //===- AArch64ExpandPseudoInsts.cpp - Expand pseudo instructions ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions to allow proper scheduling and other late optimizations. This
11 // pass should be run after register allocation but before the post-regalloc
12 // scheduling pass.
14 //===----------------------------------------------------------------------===//
16 #include "AArch64ExpandImm.h"
17 #include "AArch64InstrInfo.h"
18 #include "AArch64MachineFunctionInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "MCTargetDesc/AArch64AddressingModes.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/Triple.h"
24 #include "llvm/CodeGen/LivePhysRegs.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineInstr.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineOperand.h"
31 #include "llvm/CodeGen/TargetSubtargetInfo.h"
32 #include "llvm/IR/DebugLoc.h"
33 #include "llvm/MC/MCInstrDesc.h"
34 #include "llvm/Pass.h"
35 #include "llvm/Support/CodeGen.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Target/TargetMachine.h"
38 #include <cassert>
39 #include <cstdint>
40 #include <iterator>
41 #include <limits>
42 #include <utility>
44 using namespace llvm;
46 #define AARCH64_EXPAND_PSEUDO_NAME "AArch64 pseudo instruction expansion pass"
48 namespace {
50 class AArch64ExpandPseudo : public MachineFunctionPass {
51 public:
52 const AArch64InstrInfo *TII;
54 static char ID;
56 AArch64ExpandPseudo() : MachineFunctionPass(ID) {
57 initializeAArch64ExpandPseudoPass(*PassRegistry::getPassRegistry());
60 bool runOnMachineFunction(MachineFunction &Fn) override;
62 StringRef getPassName() const override { return AARCH64_EXPAND_PSEUDO_NAME; }
64 private:
65 bool expandMBB(MachineBasicBlock &MBB);
66 bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
67 MachineBasicBlock::iterator &NextMBBI);
68 bool expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
69 unsigned BitSize);
71 bool expandCMP_SWAP(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
72 unsigned LdarOp, unsigned StlrOp, unsigned CmpOp,
73 unsigned ExtendImm, unsigned ZeroReg,
74 MachineBasicBlock::iterator &NextMBBI);
75 bool expandCMP_SWAP_128(MachineBasicBlock &MBB,
76 MachineBasicBlock::iterator MBBI,
77 MachineBasicBlock::iterator &NextMBBI);
78 bool expandSetTagLoop(MachineBasicBlock &MBB,
79 MachineBasicBlock::iterator MBBI,
80 MachineBasicBlock::iterator &NextMBBI);
83 } // end anonymous namespace
85 char AArch64ExpandPseudo::ID = 0;
87 INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo",
88 AARCH64_EXPAND_PSEUDO_NAME, false, false)
90 /// Transfer implicit operands on the pseudo instruction to the
91 /// instructions created from the expansion.
92 static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
93 MachineInstrBuilder &DefMI) {
94 const MCInstrDesc &Desc = OldMI.getDesc();
95 for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands(); i != e;
96 ++i) {
97 const MachineOperand &MO = OldMI.getOperand(i);
98 assert(MO.isReg() && MO.getReg());
99 if (MO.isUse())
100 UseMI.add(MO);
101 else
102 DefMI.add(MO);
106 /// Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
107 /// real move-immediate instructions to synthesize the immediate.
108 bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
109 MachineBasicBlock::iterator MBBI,
110 unsigned BitSize) {
111 MachineInstr &MI = *MBBI;
112 Register DstReg = MI.getOperand(0).getReg();
113 uint64_t RenamableState =
114 MI.getOperand(0).isRenamable() ? RegState::Renamable : 0;
115 uint64_t Imm = MI.getOperand(1).getImm();
117 if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
118 // Useless def, and we don't want to risk creating an invalid ORR (which
119 // would really write to sp).
120 MI.eraseFromParent();
121 return true;
124 SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
125 AArch64_IMM::expandMOVImm(Imm, BitSize, Insn);
126 assert(Insn.size() != 0);
128 SmallVector<MachineInstrBuilder, 4> MIBS;
129 for (auto I = Insn.begin(), E = Insn.end(); I != E; ++I) {
130 bool LastItem = std::next(I) == E;
131 switch (I->Opcode)
133 default: llvm_unreachable("unhandled!"); break;
135 case AArch64::ORRWri:
136 case AArch64::ORRXri:
137 MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
138 .add(MI.getOperand(0))
139 .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
140 .addImm(I->Op2));
141 break;
142 case AArch64::MOVNWi:
143 case AArch64::MOVNXi:
144 case AArch64::MOVZWi:
145 case AArch64::MOVZXi: {
146 bool DstIsDead = MI.getOperand(0).isDead();
147 MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
148 .addReg(DstReg, RegState::Define |
149 getDeadRegState(DstIsDead && LastItem) |
150 RenamableState)
151 .addImm(I->Op1)
152 .addImm(I->Op2));
153 } break;
154 case AArch64::MOVKWi:
155 case AArch64::MOVKXi: {
156 Register DstReg = MI.getOperand(0).getReg();
157 bool DstIsDead = MI.getOperand(0).isDead();
158 MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
159 .addReg(DstReg,
160 RegState::Define |
161 getDeadRegState(DstIsDead && LastItem) |
162 RenamableState)
163 .addReg(DstReg)
164 .addImm(I->Op1)
165 .addImm(I->Op2));
166 } break;
169 transferImpOps(MI, MIBS.front(), MIBS.back());
170 MI.eraseFromParent();
171 return true;
174 bool AArch64ExpandPseudo::expandCMP_SWAP(
175 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned LdarOp,
176 unsigned StlrOp, unsigned CmpOp, unsigned ExtendImm, unsigned ZeroReg,
177 MachineBasicBlock::iterator &NextMBBI) {
178 MachineInstr &MI = *MBBI;
179 DebugLoc DL = MI.getDebugLoc();
180 const MachineOperand &Dest = MI.getOperand(0);
181 Register StatusReg = MI.getOperand(1).getReg();
182 bool StatusDead = MI.getOperand(1).isDead();
183 // Duplicating undef operands into 2 instructions does not guarantee the same
184 // value on both; However undef should be replaced by xzr anyway.
185 assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
186 Register AddrReg = MI.getOperand(2).getReg();
187 Register DesiredReg = MI.getOperand(3).getReg();
188 Register NewReg = MI.getOperand(4).getReg();
190 MachineFunction *MF = MBB.getParent();
191 auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
192 auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
193 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
195 MF->insert(++MBB.getIterator(), LoadCmpBB);
196 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
197 MF->insert(++StoreBB->getIterator(), DoneBB);
199 // .Lloadcmp:
200 // mov wStatus, 0
201 // ldaxr xDest, [xAddr]
202 // cmp xDest, xDesired
203 // b.ne .Ldone
204 if (!StatusDead)
205 BuildMI(LoadCmpBB, DL, TII->get(AArch64::MOVZWi), StatusReg)
206 .addImm(0).addImm(0);
207 BuildMI(LoadCmpBB, DL, TII->get(LdarOp), Dest.getReg())
208 .addReg(AddrReg);
209 BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg)
210 .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
211 .addReg(DesiredReg)
212 .addImm(ExtendImm);
213 BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc))
214 .addImm(AArch64CC::NE)
215 .addMBB(DoneBB)
216 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
217 LoadCmpBB->addSuccessor(DoneBB);
218 LoadCmpBB->addSuccessor(StoreBB);
220 // .Lstore:
221 // stlxr wStatus, xNew, [xAddr]
222 // cbnz wStatus, .Lloadcmp
223 BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg)
224 .addReg(NewReg)
225 .addReg(AddrReg);
226 BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
227 .addReg(StatusReg, getKillRegState(StatusDead))
228 .addMBB(LoadCmpBB);
229 StoreBB->addSuccessor(LoadCmpBB);
230 StoreBB->addSuccessor(DoneBB);
232 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
233 DoneBB->transferSuccessors(&MBB);
235 MBB.addSuccessor(LoadCmpBB);
237 NextMBBI = MBB.end();
238 MI.eraseFromParent();
240 // Recompute livein lists.
241 LivePhysRegs LiveRegs;
242 computeAndAddLiveIns(LiveRegs, *DoneBB);
243 computeAndAddLiveIns(LiveRegs, *StoreBB);
244 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
245 // Do an extra pass around the loop to get loop carried registers right.
246 StoreBB->clearLiveIns();
247 computeAndAddLiveIns(LiveRegs, *StoreBB);
248 LoadCmpBB->clearLiveIns();
249 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
251 return true;
254 bool AArch64ExpandPseudo::expandCMP_SWAP_128(
255 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
256 MachineBasicBlock::iterator &NextMBBI) {
257 MachineInstr &MI = *MBBI;
258 DebugLoc DL = MI.getDebugLoc();
259 MachineOperand &DestLo = MI.getOperand(0);
260 MachineOperand &DestHi = MI.getOperand(1);
261 Register StatusReg = MI.getOperand(2).getReg();
262 bool StatusDead = MI.getOperand(2).isDead();
263 // Duplicating undef operands into 2 instructions does not guarantee the same
264 // value on both; However undef should be replaced by xzr anyway.
265 assert(!MI.getOperand(3).isUndef() && "cannot handle undef");
266 Register AddrReg = MI.getOperand(3).getReg();
267 Register DesiredLoReg = MI.getOperand(4).getReg();
268 Register DesiredHiReg = MI.getOperand(5).getReg();
269 Register NewLoReg = MI.getOperand(6).getReg();
270 Register NewHiReg = MI.getOperand(7).getReg();
272 MachineFunction *MF = MBB.getParent();
273 auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
274 auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
275 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
277 MF->insert(++MBB.getIterator(), LoadCmpBB);
278 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
279 MF->insert(++StoreBB->getIterator(), DoneBB);
281 // .Lloadcmp:
282 // ldaxp xDestLo, xDestHi, [xAddr]
283 // cmp xDestLo, xDesiredLo
284 // sbcs xDestHi, xDesiredHi
285 // b.ne .Ldone
286 BuildMI(LoadCmpBB, DL, TII->get(AArch64::LDAXPX))
287 .addReg(DestLo.getReg(), RegState::Define)
288 .addReg(DestHi.getReg(), RegState::Define)
289 .addReg(AddrReg);
290 BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
291 .addReg(DestLo.getReg(), getKillRegState(DestLo.isDead()))
292 .addReg(DesiredLoReg)
293 .addImm(0);
294 BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
295 .addUse(AArch64::WZR)
296 .addUse(AArch64::WZR)
297 .addImm(AArch64CC::EQ);
298 BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
299 .addReg(DestHi.getReg(), getKillRegState(DestHi.isDead()))
300 .addReg(DesiredHiReg)
301 .addImm(0);
302 BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
303 .addUse(StatusReg, RegState::Kill)
304 .addUse(StatusReg, RegState::Kill)
305 .addImm(AArch64CC::EQ);
306 BuildMI(LoadCmpBB, DL, TII->get(AArch64::CBNZW))
307 .addUse(StatusReg, getKillRegState(StatusDead))
308 .addMBB(DoneBB);
309 LoadCmpBB->addSuccessor(DoneBB);
310 LoadCmpBB->addSuccessor(StoreBB);
312 // .Lstore:
313 // stlxp wStatus, xNewLo, xNewHi, [xAddr]
314 // cbnz wStatus, .Lloadcmp
315 BuildMI(StoreBB, DL, TII->get(AArch64::STLXPX), StatusReg)
316 .addReg(NewLoReg)
317 .addReg(NewHiReg)
318 .addReg(AddrReg);
319 BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
320 .addReg(StatusReg, getKillRegState(StatusDead))
321 .addMBB(LoadCmpBB);
322 StoreBB->addSuccessor(LoadCmpBB);
323 StoreBB->addSuccessor(DoneBB);
325 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
326 DoneBB->transferSuccessors(&MBB);
328 MBB.addSuccessor(LoadCmpBB);
330 NextMBBI = MBB.end();
331 MI.eraseFromParent();
333 // Recompute liveness bottom up.
334 LivePhysRegs LiveRegs;
335 computeAndAddLiveIns(LiveRegs, *DoneBB);
336 computeAndAddLiveIns(LiveRegs, *StoreBB);
337 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
338 // Do an extra pass in the loop to get the loop carried dependencies right.
339 StoreBB->clearLiveIns();
340 computeAndAddLiveIns(LiveRegs, *StoreBB);
341 LoadCmpBB->clearLiveIns();
342 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
344 return true;
347 bool AArch64ExpandPseudo::expandSetTagLoop(
348 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
349 MachineBasicBlock::iterator &NextMBBI) {
350 MachineInstr &MI = *MBBI;
351 DebugLoc DL = MI.getDebugLoc();
352 Register SizeReg = MI.getOperand(0).getReg();
353 Register AddressReg = MI.getOperand(1).getReg();
355 MachineFunction *MF = MBB.getParent();
357 bool ZeroData = MI.getOpcode() == AArch64::STZGloop_wback;
358 const unsigned OpCode1 =
359 ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex;
360 const unsigned OpCode2 =
361 ZeroData ? AArch64::STZ2GPostIndex : AArch64::ST2GPostIndex;
363 unsigned Size = MI.getOperand(2).getImm();
364 assert(Size > 0 && Size % 16 == 0);
365 if (Size % (16 * 2) != 0) {
366 BuildMI(MBB, MBBI, DL, TII->get(OpCode1), AddressReg)
367 .addReg(AddressReg)
368 .addReg(AddressReg)
369 .addImm(1);
370 Size -= 16;
372 MachineBasicBlock::iterator I =
373 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), SizeReg)
374 .addImm(Size);
375 expandMOVImm(MBB, I, 64);
377 auto LoopBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
378 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
380 MF->insert(++MBB.getIterator(), LoopBB);
381 MF->insert(++LoopBB->getIterator(), DoneBB);
383 BuildMI(LoopBB, DL, TII->get(OpCode2))
384 .addDef(AddressReg)
385 .addReg(AddressReg)
386 .addReg(AddressReg)
387 .addImm(2)
388 .cloneMemRefs(MI)
389 .setMIFlags(MI.getFlags());
390 BuildMI(LoopBB, DL, TII->get(AArch64::SUBXri))
391 .addDef(SizeReg)
392 .addReg(SizeReg)
393 .addImm(16 * 2)
394 .addImm(0);
395 BuildMI(LoopBB, DL, TII->get(AArch64::CBNZX)).addUse(SizeReg).addMBB(LoopBB);
397 LoopBB->addSuccessor(LoopBB);
398 LoopBB->addSuccessor(DoneBB);
400 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
401 DoneBB->transferSuccessors(&MBB);
403 MBB.addSuccessor(LoopBB);
405 NextMBBI = MBB.end();
406 MI.eraseFromParent();
407 // Recompute liveness bottom up.
408 LivePhysRegs LiveRegs;
409 computeAndAddLiveIns(LiveRegs, *DoneBB);
410 computeAndAddLiveIns(LiveRegs, *LoopBB);
411 // Do an extra pass in the loop to get the loop carried dependencies right.
412 // FIXME: is this necessary?
413 LoopBB->clearLiveIns();
414 computeAndAddLiveIns(LiveRegs, *LoopBB);
415 DoneBB->clearLiveIns();
416 computeAndAddLiveIns(LiveRegs, *DoneBB);
418 return true;
421 /// If MBBI references a pseudo instruction that should be expanded here,
422 /// do the expansion and return true. Otherwise return false.
423 bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
424 MachineBasicBlock::iterator MBBI,
425 MachineBasicBlock::iterator &NextMBBI) {
426 MachineInstr &MI = *MBBI;
427 unsigned Opcode = MI.getOpcode();
428 switch (Opcode) {
429 default:
430 break;
432 case AArch64::ADDWrr:
433 case AArch64::SUBWrr:
434 case AArch64::ADDXrr:
435 case AArch64::SUBXrr:
436 case AArch64::ADDSWrr:
437 case AArch64::SUBSWrr:
438 case AArch64::ADDSXrr:
439 case AArch64::SUBSXrr:
440 case AArch64::ANDWrr:
441 case AArch64::ANDXrr:
442 case AArch64::BICWrr:
443 case AArch64::BICXrr:
444 case AArch64::ANDSWrr:
445 case AArch64::ANDSXrr:
446 case AArch64::BICSWrr:
447 case AArch64::BICSXrr:
448 case AArch64::EONWrr:
449 case AArch64::EONXrr:
450 case AArch64::EORWrr:
451 case AArch64::EORXrr:
452 case AArch64::ORNWrr:
453 case AArch64::ORNXrr:
454 case AArch64::ORRWrr:
455 case AArch64::ORRXrr: {
456 unsigned Opcode;
457 switch (MI.getOpcode()) {
458 default:
459 return false;
460 case AArch64::ADDWrr: Opcode = AArch64::ADDWrs; break;
461 case AArch64::SUBWrr: Opcode = AArch64::SUBWrs; break;
462 case AArch64::ADDXrr: Opcode = AArch64::ADDXrs; break;
463 case AArch64::SUBXrr: Opcode = AArch64::SUBXrs; break;
464 case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs; break;
465 case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs; break;
466 case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs; break;
467 case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs; break;
468 case AArch64::ANDWrr: Opcode = AArch64::ANDWrs; break;
469 case AArch64::ANDXrr: Opcode = AArch64::ANDXrs; break;
470 case AArch64::BICWrr: Opcode = AArch64::BICWrs; break;
471 case AArch64::BICXrr: Opcode = AArch64::BICXrs; break;
472 case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs; break;
473 case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs; break;
474 case AArch64::BICSWrr: Opcode = AArch64::BICSWrs; break;
475 case AArch64::BICSXrr: Opcode = AArch64::BICSXrs; break;
476 case AArch64::EONWrr: Opcode = AArch64::EONWrs; break;
477 case AArch64::EONXrr: Opcode = AArch64::EONXrs; break;
478 case AArch64::EORWrr: Opcode = AArch64::EORWrs; break;
479 case AArch64::EORXrr: Opcode = AArch64::EORXrs; break;
480 case AArch64::ORNWrr: Opcode = AArch64::ORNWrs; break;
481 case AArch64::ORNXrr: Opcode = AArch64::ORNXrs; break;
482 case AArch64::ORRWrr: Opcode = AArch64::ORRWrs; break;
483 case AArch64::ORRXrr: Opcode = AArch64::ORRXrs; break;
485 MachineInstrBuilder MIB1 =
486 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode),
487 MI.getOperand(0).getReg())
488 .add(MI.getOperand(1))
489 .add(MI.getOperand(2))
490 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
491 transferImpOps(MI, MIB1, MIB1);
492 MI.eraseFromParent();
493 return true;
496 case AArch64::LOADgot: {
497 MachineFunction *MF = MBB.getParent();
498 Register DstReg = MI.getOperand(0).getReg();
499 const MachineOperand &MO1 = MI.getOperand(1);
500 unsigned Flags = MO1.getTargetFlags();
502 if (MF->getTarget().getCodeModel() == CodeModel::Tiny) {
503 // Tiny codemodel expand to LDR
504 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
505 TII->get(AArch64::LDRXl), DstReg);
507 if (MO1.isGlobal()) {
508 MIB.addGlobalAddress(MO1.getGlobal(), 0, Flags);
509 } else if (MO1.isSymbol()) {
510 MIB.addExternalSymbol(MO1.getSymbolName(), Flags);
511 } else {
512 assert(MO1.isCPI() &&
513 "Only expect globals, externalsymbols, or constant pools");
514 MIB.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(), Flags);
516 } else {
517 // Small codemodel expand into ADRP + LDR.
518 MachineFunction &MF = *MI.getParent()->getParent();
519 DebugLoc DL = MI.getDebugLoc();
520 MachineInstrBuilder MIB1 =
521 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
523 MachineInstrBuilder MIB2;
524 if (MF.getSubtarget<AArch64Subtarget>().isTargetILP32()) {
525 auto TRI = MBB.getParent()->getSubtarget().getRegisterInfo();
526 unsigned Reg32 = TRI->getSubReg(DstReg, AArch64::sub_32);
527 unsigned DstFlags = MI.getOperand(0).getTargetFlags();
528 MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRWui))
529 .addDef(Reg32)
530 .addReg(DstReg, RegState::Kill)
531 .addReg(DstReg, DstFlags | RegState::Implicit);
532 } else {
533 unsigned DstReg = MI.getOperand(0).getReg();
534 MIB2 = BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRXui))
535 .add(MI.getOperand(0))
536 .addUse(DstReg, RegState::Kill);
539 if (MO1.isGlobal()) {
540 MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
541 MIB2.addGlobalAddress(MO1.getGlobal(), 0,
542 Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
543 } else if (MO1.isSymbol()) {
544 MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
545 MIB2.addExternalSymbol(MO1.getSymbolName(), Flags |
546 AArch64II::MO_PAGEOFF |
547 AArch64II::MO_NC);
548 } else {
549 assert(MO1.isCPI() &&
550 "Only expect globals, externalsymbols, or constant pools");
551 MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
552 Flags | AArch64II::MO_PAGE);
553 MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
554 Flags | AArch64II::MO_PAGEOFF |
555 AArch64II::MO_NC);
558 transferImpOps(MI, MIB1, MIB2);
560 MI.eraseFromParent();
561 return true;
564 case AArch64::MOVaddr:
565 case AArch64::MOVaddrJT:
566 case AArch64::MOVaddrCP:
567 case AArch64::MOVaddrBA:
568 case AArch64::MOVaddrTLS:
569 case AArch64::MOVaddrEXT: {
570 // Expand into ADRP + ADD.
571 Register DstReg = MI.getOperand(0).getReg();
572 MachineInstrBuilder MIB1 =
573 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
574 .add(MI.getOperand(1));
576 if (MI.getOperand(1).getTargetFlags() & AArch64II::MO_TAGGED) {
577 // MO_TAGGED on the page indicates a tagged address. Set the tag now.
578 // We do so by creating a MOVK that sets bits 48-63 of the register to
579 // (global address + 0x100000000 - PC) >> 48. This assumes that we're in
580 // the small code model so we can assume a binary size of <= 4GB, which
581 // makes the untagged PC relative offset positive. The binary must also be
582 // loaded into address range [0, 2^48). Both of these properties need to
583 // be ensured at runtime when using tagged addresses.
584 auto Tag = MI.getOperand(1);
585 Tag.setTargetFlags(AArch64II::MO_PREL | AArch64II::MO_G3);
586 Tag.setOffset(0x100000000);
587 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi), DstReg)
588 .addReg(DstReg)
589 .add(Tag)
590 .addImm(48);
593 MachineInstrBuilder MIB2 =
594 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
595 .add(MI.getOperand(0))
596 .addReg(DstReg)
597 .add(MI.getOperand(2))
598 .addImm(0);
600 transferImpOps(MI, MIB1, MIB2);
601 MI.eraseFromParent();
602 return true;
604 case AArch64::ADDlowTLS:
605 // Produce a plain ADD
606 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
607 .add(MI.getOperand(0))
608 .add(MI.getOperand(1))
609 .add(MI.getOperand(2))
610 .addImm(0);
611 MI.eraseFromParent();
612 return true;
614 case AArch64::MOVbaseTLS: {
615 Register DstReg = MI.getOperand(0).getReg();
616 auto SysReg = AArch64SysReg::TPIDR_EL0;
617 MachineFunction *MF = MBB.getParent();
618 if (MF->getTarget().getTargetTriple().isOSFuchsia() &&
619 MF->getTarget().getCodeModel() == CodeModel::Kernel)
620 SysReg = AArch64SysReg::TPIDR_EL1;
621 else if (MF->getSubtarget<AArch64Subtarget>().useEL3ForTP())
622 SysReg = AArch64SysReg::TPIDR_EL3;
623 else if (MF->getSubtarget<AArch64Subtarget>().useEL2ForTP())
624 SysReg = AArch64SysReg::TPIDR_EL2;
625 else if (MF->getSubtarget<AArch64Subtarget>().useEL1ForTP())
626 SysReg = AArch64SysReg::TPIDR_EL1;
627 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MRS), DstReg)
628 .addImm(SysReg);
629 MI.eraseFromParent();
630 return true;
633 case AArch64::MOVi32imm:
634 return expandMOVImm(MBB, MBBI, 32);
635 case AArch64::MOVi64imm:
636 return expandMOVImm(MBB, MBBI, 64);
637 case AArch64::RET_ReallyLR: {
638 // Hiding the LR use with RET_ReallyLR may lead to extra kills in the
639 // function and missing live-ins. We are fine in practice because callee
640 // saved register handling ensures the register value is restored before
641 // RET, but we need the undef flag here to appease the MachineVerifier
642 // liveness checks.
643 MachineInstrBuilder MIB =
644 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::RET))
645 .addReg(AArch64::LR, RegState::Undef);
646 transferImpOps(MI, MIB, MIB);
647 MI.eraseFromParent();
648 return true;
650 case AArch64::CMP_SWAP_8:
651 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRB, AArch64::STLXRB,
652 AArch64::SUBSWrx,
653 AArch64_AM::getArithExtendImm(AArch64_AM::UXTB, 0),
654 AArch64::WZR, NextMBBI);
655 case AArch64::CMP_SWAP_16:
656 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRH, AArch64::STLXRH,
657 AArch64::SUBSWrx,
658 AArch64_AM::getArithExtendImm(AArch64_AM::UXTH, 0),
659 AArch64::WZR, NextMBBI);
660 case AArch64::CMP_SWAP_32:
661 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRW, AArch64::STLXRW,
662 AArch64::SUBSWrs,
663 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
664 AArch64::WZR, NextMBBI);
665 case AArch64::CMP_SWAP_64:
666 return expandCMP_SWAP(MBB, MBBI,
667 AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
668 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
669 AArch64::XZR, NextMBBI);
670 case AArch64::CMP_SWAP_128:
671 return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
673 case AArch64::AESMCrrTied:
674 case AArch64::AESIMCrrTied: {
675 MachineInstrBuilder MIB =
676 BuildMI(MBB, MBBI, MI.getDebugLoc(),
677 TII->get(Opcode == AArch64::AESMCrrTied ? AArch64::AESMCrr :
678 AArch64::AESIMCrr))
679 .add(MI.getOperand(0))
680 .add(MI.getOperand(1));
681 transferImpOps(MI, MIB, MIB);
682 MI.eraseFromParent();
683 return true;
685 case AArch64::IRGstack: {
686 MachineFunction &MF = *MBB.getParent();
687 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
688 const AArch64FrameLowering *TFI =
689 MF.getSubtarget<AArch64Subtarget>().getFrameLowering();
691 // IRG does not allow immediate offset. getTaggedBasePointerOffset should
692 // almost always point to SP-after-prologue; if not, emit a longer
693 // instruction sequence.
694 int BaseOffset = -AFI->getTaggedBasePointerOffset();
695 unsigned FrameReg;
696 StackOffset FrameRegOffset = TFI->resolveFrameOffsetReference(
697 MF, BaseOffset, false /*isFixed*/, false /*isSVE*/, FrameReg,
698 /*PreferFP=*/false,
699 /*ForSimm=*/true);
700 Register SrcReg = FrameReg;
701 if (FrameRegOffset) {
702 // Use output register as temporary.
703 SrcReg = MI.getOperand(0).getReg();
704 emitFrameOffset(MBB, &MI, MI.getDebugLoc(), SrcReg, FrameReg,
705 FrameRegOffset, TII);
707 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::IRG))
708 .add(MI.getOperand(0))
709 .addUse(SrcReg)
710 .add(MI.getOperand(2));
711 MI.eraseFromParent();
712 return true;
714 case AArch64::TAGPstack: {
715 int64_t Offset = MI.getOperand(2).getImm();
716 BuildMI(MBB, MBBI, MI.getDebugLoc(),
717 TII->get(Offset >= 0 ? AArch64::ADDG : AArch64::SUBG))
718 .add(MI.getOperand(0))
719 .add(MI.getOperand(1))
720 .addImm(std::abs(Offset))
721 .add(MI.getOperand(4));
722 MI.eraseFromParent();
723 return true;
725 case AArch64::STGloop_wback:
726 case AArch64::STZGloop_wback:
727 return expandSetTagLoop(MBB, MBBI, NextMBBI);
728 case AArch64::STGloop:
729 case AArch64::STZGloop:
730 report_fatal_error(
731 "Non-writeback variants of STGloop / STZGloop should not "
732 "survive past PrologEpilogInserter.");
734 return false;
737 /// Iterate over the instructions in basic block MBB and expand any
738 /// pseudo instructions. Return true if anything was modified.
739 bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
740 bool Modified = false;
742 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
743 while (MBBI != E) {
744 MachineBasicBlock::iterator NMBBI = std::next(MBBI);
745 Modified |= expandMI(MBB, MBBI, NMBBI);
746 MBBI = NMBBI;
749 return Modified;
752 bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
753 TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
755 bool Modified = false;
756 for (auto &MBB : MF)
757 Modified |= expandMBB(MBB);
758 return Modified;
761 /// Returns an instance of the pseudo instruction expansion pass.
762 FunctionPass *llvm::createAArch64ExpandPseudoPass() {
763 return new AArch64ExpandPseudo();