[AMDGPU] Check for immediate SrcC in mfma in AsmParser
[llvm-core.git] / lib / Target / ARC / ARCOptAddrMode.cpp
blob22a3b9111c8efc6a33a493ee4ff230635f5d5787
1 //===- ARCOptAddrMode.cpp ---------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass folds LD/ST + ADD pairs into Pre/Post-increment form of
11 /// load/store instructions.
12 //===----------------------------------------------------------------------===//
14 #include "ARC.h"
15 #define GET_INSTRMAP_INFO
16 #include "ARCInstrInfo.h"
17 #include "ARCTargetMachine.h"
18 #include "llvm/CodeGen/MachineBasicBlock.h"
19 #include "llvm/CodeGen/MachineDominators.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/raw_ostream.h"
28 using namespace llvm;
30 #define OPTADDRMODE_DESC "ARC load/store address mode"
31 #define OPTADDRMODE_NAME "arc-addr-mode"
32 #define DEBUG_TYPE "arc-addr-mode"
34 namespace llvm {
35 FunctionPass *createARCOptAddrMode();
36 void initializeARCOptAddrModePass(PassRegistry &);
37 } // end namespace llvm
39 namespace {
40 class ARCOptAddrMode : public MachineFunctionPass {
41 public:
42 static char ID;
44 ARCOptAddrMode() : MachineFunctionPass(ID) {}
46 StringRef getPassName() const override { return OPTADDRMODE_DESC; }
48 void getAnalysisUsage(AnalysisUsage &AU) const override {
49 AU.setPreservesCFG();
50 MachineFunctionPass::getAnalysisUsage(AU);
51 AU.addRequired<MachineDominatorTree>();
52 AU.addPreserved<MachineDominatorTree>();
55 bool runOnMachineFunction(MachineFunction &MF) override;
57 private:
58 const ARCSubtarget *AST = nullptr;
59 const ARCInstrInfo *AII = nullptr;
60 MachineRegisterInfo *MRI = nullptr;
61 MachineDominatorTree *MDT = nullptr;
63 // Tries to combine \p Ldst with increment of its base register to form
64 // single post-increment instruction.
65 MachineInstr *tryToCombine(MachineInstr &Ldst);
67 // Returns true if result of \p Add is not used before \p Ldst
68 bool noUseOfAddBeforeLoadOrStore(const MachineInstr *Add,
69 const MachineInstr *Ldst);
71 // Returns true if load/store instruction \p Ldst can be hoisted up to
72 // instruction \p To
73 bool canHoistLoadStoreTo(MachineInstr *Ldst, MachineInstr *To);
75 // Returns true if load/store instruction \p Ldst can be sunk down
76 // to instruction \p To
77 bool canSinkLoadStoreTo(MachineInstr *Ldst, MachineInstr *To);
79 // Check if instructions \p Ldst and \p Add can be moved to become adjacent
80 // If they can return instruction which need not to move.
81 // If \p Uses is not null, fill it with instructions after \p Ldst which use
82 // \p Ldst's base register
83 MachineInstr *canJoinInstructions(MachineInstr *Ldst, MachineInstr *Add,
84 SmallVectorImpl<MachineInstr *> *Uses);
86 // Returns true if all instruction in \p Uses array can be adjusted
87 // to accomodate increment of register \p BaseReg by \p Incr
88 bool canFixPastUses(const ArrayRef<MachineInstr *> &Uses,
89 MachineOperand &Incr, unsigned BaseReg);
91 // Update all instructions in \p Uses to accomodate increment
92 // of \p BaseReg by \p Offset
93 void fixPastUses(ArrayRef<MachineInstr *> Uses, unsigned BaseReg,
94 int64_t Offset);
96 // Change instruction \p Ldst to postincrement form.
97 // \p NewBase is register to hold update base value
98 // \p NewOffset is instruction's new offset
99 void changeToAddrMode(MachineInstr &Ldst, unsigned NewOpcode,
100 unsigned NewBase, MachineOperand &NewOffset);
102 bool processBasicBlock(MachineBasicBlock &MBB);
105 } // end anonymous namespace
107 char ARCOptAddrMode::ID = 0;
108 INITIALIZE_PASS_BEGIN(ARCOptAddrMode, OPTADDRMODE_NAME, OPTADDRMODE_DESC, false,
109 false)
110 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
111 INITIALIZE_PASS_END(ARCOptAddrMode, OPTADDRMODE_NAME, OPTADDRMODE_DESC, false,
112 false)
114 // Return true if \p Off can be used as immediate offset
115 // operand of load/store instruction (S9 literal)
116 static bool isValidLoadStoreOffset(int64_t Off) { return isInt<9>(Off); }
118 // Return true if \p Off can be used as immediate operand of
119 // ADD/SUB instruction (U6 literal)
120 static bool isValidIncrementOffset(int64_t Off) { return isUInt<6>(Off); }
122 static bool isAddConstantOp(const MachineInstr &MI, int64_t &Amount) {
123 int64_t Sign = 1;
124 switch (MI.getOpcode()) {
125 case ARC::SUB_rru6:
126 Sign = -1;
127 LLVM_FALLTHROUGH;
128 case ARC::ADD_rru6:
129 assert(MI.getOperand(2).isImm() && "Expected immediate operand");
130 Amount = Sign * MI.getOperand(2).getImm();
131 return true;
132 default:
133 return false;
137 // Return true if \p MI dominates of uses of virtual register \p VReg
138 static bool dominatesAllUsesOf(const MachineInstr *MI, unsigned VReg,
139 MachineDominatorTree *MDT,
140 MachineRegisterInfo *MRI) {
142 assert(Register::isVirtualRegister(VReg) && "Expected virtual register!");
144 for (auto it = MRI->use_nodbg_begin(VReg), end = MRI->use_nodbg_end();
145 it != end; ++it) {
146 MachineInstr *User = it->getParent();
147 if (User->isPHI()) {
148 unsigned BBOperandIdx = User->getOperandNo(&*it) + 1;
149 MachineBasicBlock *MBB = User->getOperand(BBOperandIdx).getMBB();
150 if (MBB->empty()) {
151 const MachineBasicBlock *InstBB = MI->getParent();
152 assert(InstBB != MBB && "Instruction found in empty MBB");
153 if (!MDT->dominates(InstBB, MBB))
154 return false;
155 continue;
157 User = &*MBB->rbegin();
160 if (!MDT->dominates(MI, User))
161 return false;
163 return true;
166 // Return true if \p MI is load/store instruction with immediate offset
167 // which can be adjusted by \p Disp
168 static bool isLoadStoreThatCanHandleDisplacement(const TargetInstrInfo *TII,
169 const MachineInstr &MI,
170 int64_t Disp) {
171 unsigned BasePos, OffPos;
172 if (!TII->getBaseAndOffsetPosition(MI, BasePos, OffPos))
173 return false;
174 const MachineOperand &MO = MI.getOperand(OffPos);
175 if (!MO.isImm())
176 return false;
177 int64_t Offset = MO.getImm() + Disp;
178 return isValidLoadStoreOffset(Offset);
181 bool ARCOptAddrMode::noUseOfAddBeforeLoadOrStore(const MachineInstr *Add,
182 const MachineInstr *Ldst) {
183 Register R = Add->getOperand(0).getReg();
184 return dominatesAllUsesOf(Ldst, R, MDT, MRI);
187 MachineInstr *ARCOptAddrMode::tryToCombine(MachineInstr &Ldst) {
188 assert((Ldst.mayLoad() || Ldst.mayStore()) && "LD/ST instruction expected");
190 unsigned BasePos, OffsetPos;
192 LLVM_DEBUG(dbgs() << "[ABAW] tryToCombine " << Ldst);
193 if (!AII->getBaseAndOffsetPosition(Ldst, BasePos, OffsetPos)) {
194 LLVM_DEBUG(dbgs() << "[ABAW] Not a recognized load/store\n");
195 return nullptr;
198 MachineOperand &Base = Ldst.getOperand(BasePos);
199 MachineOperand &Offset = Ldst.getOperand(OffsetPos);
201 assert(Base.isReg() && "Base operand must be register");
202 if (!Offset.isImm()) {
203 LLVM_DEBUG(dbgs() << "[ABAW] Offset is not immediate\n");
204 return nullptr;
207 Register B = Base.getReg();
208 if (Register::isStackSlot(B) || !Register::isVirtualRegister(B)) {
209 LLVM_DEBUG(dbgs() << "[ABAW] Base is not VReg\n");
210 return nullptr;
213 // TODO: try to generate address preincrement
214 if (Offset.getImm() != 0) {
215 LLVM_DEBUG(dbgs() << "[ABAW] Non-zero offset\n");
216 return nullptr;
219 for (auto &Add : MRI->use_nodbg_instructions(B)) {
220 int64_t Incr;
221 if (!isAddConstantOp(Add, Incr))
222 continue;
223 if (!isValidLoadStoreOffset(Incr))
224 continue;
226 SmallVector<MachineInstr *, 8> Uses;
227 MachineInstr *MoveTo = canJoinInstructions(&Ldst, &Add, &Uses);
229 if (!MoveTo)
230 continue;
232 if (!canFixPastUses(Uses, Add.getOperand(2), B))
233 continue;
235 LLVM_DEBUG(MachineInstr *First = &Ldst; MachineInstr *Last = &Add;
236 if (MDT->dominates(Last, First)) std::swap(First, Last);
237 dbgs() << "[ABAW] Instructions " << *First << " and " << *Last
238 << " combined\n";
242 MachineInstr *Result = Ldst.getNextNode();
243 if (MoveTo == &Add) {
244 Ldst.removeFromParent();
245 Add.getParent()->insertAfter(Add.getIterator(), &Ldst);
247 if (Result == &Add)
248 Result = Result->getNextNode();
250 fixPastUses(Uses, B, Incr);
252 int NewOpcode = ARC::getPostIncOpcode(Ldst.getOpcode());
253 assert(NewOpcode > 0 && "No postincrement form found");
254 unsigned NewBaseReg = Add.getOperand(0).getReg();
255 changeToAddrMode(Ldst, NewOpcode, NewBaseReg, Add.getOperand(2));
256 Add.eraseFromParent();
258 return Result;
260 return nullptr;
263 MachineInstr *
264 ARCOptAddrMode::canJoinInstructions(MachineInstr *Ldst, MachineInstr *Add,
265 SmallVectorImpl<MachineInstr *> *Uses) {
266 assert(Ldst && Add && "NULL instruction passed");
268 MachineInstr *First = Add;
269 MachineInstr *Last = Ldst;
270 if (MDT->dominates(Ldst, Add))
271 std::swap(First, Last);
272 else if (!MDT->dominates(Add, Ldst))
273 return nullptr;
275 LLVM_DEBUG(dbgs() << "canJoinInstructions: " << *First << *Last);
277 unsigned BasePos, OffPos;
279 if (!AII->getBaseAndOffsetPosition(*Ldst, BasePos, OffPos)) {
280 LLVM_DEBUG(
281 dbgs()
282 << "[canJoinInstructions] Cannot determine base/offset position\n");
283 return nullptr;
286 Register BaseReg = Ldst->getOperand(BasePos).getReg();
288 // prohibit this:
289 // v1 = add v0, c
290 // st v1, [v0, 0]
291 // and this
292 // st v0, [v0, 0]
293 // v1 = add v0, c
294 if (Ldst->mayStore() && Ldst->getOperand(0).isReg()) {
295 Register StReg = Ldst->getOperand(0).getReg();
296 if (Add->getOperand(0).getReg() == StReg || BaseReg == StReg) {
297 LLVM_DEBUG(dbgs() << "[canJoinInstructions] Store uses result of Add\n");
298 return nullptr;
302 SmallVector<MachineInstr *, 4> UsesAfterLdst;
303 SmallVector<MachineInstr *, 4> UsesAfterAdd;
304 for (MachineInstr &MI : MRI->use_nodbg_instructions(BaseReg)) {
305 if (&MI == Ldst || &MI == Add)
306 continue;
307 if (&MI != Add && MDT->dominates(Ldst, &MI))
308 UsesAfterLdst.push_back(&MI);
309 else if (!MDT->dominates(&MI, Ldst))
310 return nullptr;
311 if (MDT->dominates(Add, &MI))
312 UsesAfterAdd.push_back(&MI);
315 MachineInstr *Result = nullptr;
317 if (First == Add) {
318 // n = add b, i
319 // ...
320 // x = ld [b, o] or x = ld [n, o]
322 if (noUseOfAddBeforeLoadOrStore(First, Last)) {
323 Result = Last;
324 LLVM_DEBUG(dbgs() << "[canJoinInstructions] Can sink Add down to Ldst\n");
325 } else if (canHoistLoadStoreTo(Ldst, Add)) {
326 Result = First;
327 LLVM_DEBUG(dbgs() << "[canJoinInstructions] Can hoist Ldst to Add\n");
329 } else {
330 // x = ld [b, o]
331 // ...
332 // n = add b, i
333 Result = First;
334 LLVM_DEBUG(dbgs() << "[canJoinInstructions] Can hoist Add to Ldst\n");
336 if (Result && Uses)
337 *Uses = (Result == Ldst) ? UsesAfterLdst : UsesAfterAdd;
338 return Result;
341 bool ARCOptAddrMode::canFixPastUses(const ArrayRef<MachineInstr *> &Uses,
342 MachineOperand &Incr, unsigned BaseReg) {
344 assert(Incr.isImm() && "Expected immediate increment");
345 int64_t NewOffset = Incr.getImm();
346 for (MachineInstr *MI : Uses) {
347 int64_t Dummy;
348 if (isAddConstantOp(*MI, Dummy)) {
349 if (isValidIncrementOffset(Dummy + NewOffset))
350 continue;
351 return false;
353 if (isLoadStoreThatCanHandleDisplacement(AII, *MI, -NewOffset))
354 continue;
355 LLVM_DEBUG(dbgs() << "Instruction cannot handle displacement " << -NewOffset
356 << ": " << *MI);
357 return false;
359 return true;
362 void ARCOptAddrMode::fixPastUses(ArrayRef<MachineInstr *> Uses,
363 unsigned NewBase, int64_t NewOffset) {
365 for (MachineInstr *MI : Uses) {
366 int64_t Amount;
367 unsigned BasePos, OffPos;
368 if (isAddConstantOp(*MI, Amount)) {
369 NewOffset += Amount;
370 assert(isValidIncrementOffset(NewOffset) &&
371 "New offset won't fit into ADD instr");
372 BasePos = 1;
373 OffPos = 2;
374 } else if (AII->getBaseAndOffsetPosition(*MI, BasePos, OffPos)) {
375 MachineOperand &MO = MI->getOperand(OffPos);
376 assert(MO.isImm() && "expected immediate operand");
377 NewOffset += MO.getImm();
378 assert(isValidLoadStoreOffset(NewOffset) &&
379 "New offset won't fit into LD/ST");
380 } else
381 llvm_unreachable("unexpected instruction");
383 MI->getOperand(BasePos).setReg(NewBase);
384 MI->getOperand(OffPos).setImm(NewOffset);
388 bool ARCOptAddrMode::canHoistLoadStoreTo(MachineInstr *Ldst, MachineInstr *To) {
389 if (Ldst->getParent() != To->getParent())
390 return false;
391 MachineBasicBlock::const_iterator MI(To), ME(Ldst),
392 End(Ldst->getParent()->end());
394 bool IsStore = Ldst->mayStore();
395 for (; MI != ME && MI != End; ++MI) {
396 if (MI->isDebugValue())
397 continue;
398 if (MI->mayStore() || MI->isCall() || MI->isInlineAsm() ||
399 MI->hasUnmodeledSideEffects())
400 return false;
401 if (IsStore && MI->mayLoad())
402 return false;
405 for (auto &O : Ldst->explicit_operands()) {
406 if (!O.isReg() || !O.isUse())
407 continue;
408 MachineInstr *OpDef = MRI->getVRegDef(O.getReg());
409 if (!OpDef || !MDT->dominates(OpDef, To))
410 return false;
412 return true;
415 bool ARCOptAddrMode::canSinkLoadStoreTo(MachineInstr *Ldst, MachineInstr *To) {
416 // Can only sink load/store within same BB
417 if (Ldst->getParent() != To->getParent())
418 return false;
419 MachineBasicBlock::const_iterator MI(Ldst), ME(To),
420 End(Ldst->getParent()->end());
422 bool IsStore = Ldst->mayStore();
423 bool IsLoad = Ldst->mayLoad();
425 Register ValReg = IsLoad ? Ldst->getOperand(0).getReg() : Register();
426 for (; MI != ME && MI != End; ++MI) {
427 if (MI->isDebugValue())
428 continue;
429 if (MI->mayStore() || MI->isCall() || MI->isInlineAsm() ||
430 MI->hasUnmodeledSideEffects())
431 return false;
432 if (IsStore && MI->mayLoad())
433 return false;
434 if (ValReg && MI->readsVirtualRegister(ValReg))
435 return false;
437 return true;
440 void ARCOptAddrMode::changeToAddrMode(MachineInstr &Ldst, unsigned NewOpcode,
441 unsigned NewBase,
442 MachineOperand &NewOffset) {
443 bool IsStore = Ldst.mayStore();
444 unsigned BasePos, OffPos;
445 MachineOperand Src = MachineOperand::CreateImm(0xDEADBEEF);
446 AII->getBaseAndOffsetPosition(Ldst, BasePos, OffPos);
448 Register BaseReg = Ldst.getOperand(BasePos).getReg();
450 Ldst.RemoveOperand(OffPos);
451 Ldst.RemoveOperand(BasePos);
453 if (IsStore) {
454 Src = Ldst.getOperand(BasePos - 1);
455 Ldst.RemoveOperand(BasePos - 1);
458 Ldst.setDesc(AST->getInstrInfo()->get(NewOpcode));
459 Ldst.addOperand(MachineOperand::CreateReg(NewBase, true));
460 if (IsStore)
461 Ldst.addOperand(Src);
462 Ldst.addOperand(MachineOperand::CreateReg(BaseReg, false));
463 Ldst.addOperand(NewOffset);
464 LLVM_DEBUG(dbgs() << "[ABAW] New Ldst: " << Ldst);
467 bool ARCOptAddrMode::processBasicBlock(MachineBasicBlock &MBB) {
468 bool Changed = false;
469 for (auto MI = MBB.begin(), ME = MBB.end(); MI != ME; ++MI) {
470 if (MI->isDebugValue())
471 continue;
472 if (!MI->mayLoad() && !MI->mayStore())
473 continue;
474 if (ARC::getPostIncOpcode(MI->getOpcode()) < 0)
475 continue;
476 MachineInstr *Res = tryToCombine(*MI);
477 if (Res) {
478 Changed = true;
479 // Res points to the next instruction. Rewind to process it
480 MI = std::prev(Res->getIterator());
483 return Changed;
486 bool ARCOptAddrMode::runOnMachineFunction(MachineFunction &MF) {
487 if (skipFunction(MF.getFunction()))
488 return false;
490 AST = &MF.getSubtarget<ARCSubtarget>();
491 AII = AST->getInstrInfo();
492 MRI = &MF.getRegInfo();
493 MDT = &getAnalysis<MachineDominatorTree>();
495 bool Changed = false;
496 for (auto &MBB : MF)
497 Changed |= processBasicBlock(MBB);
498 return Changed;
501 //===----------------------------------------------------------------------===//
502 // Public Constructor Functions
503 //===----------------------------------------------------------------------===//
505 FunctionPass *llvm::createARCOptAddrMode() { return new ARCOptAddrMode(); }