the various ConstantExpr::get*Ty methods existed to work with issues around
[llvm/stm8.git] / lib / Target / CellSPU / SPUInstrInfo.cpp
blob93b6d4c55691d692518e9f522266a7b61ef6cddf
1 //===- SPUInstrInfo.cpp - Cell SPU Instruction Information ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the Cell SPU implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SPURegisterNames.h"
15 #include "SPUInstrInfo.h"
16 #include "SPUInstrBuilder.h"
17 #include "SPUTargetMachine.h"
18 #include "SPUHazardRecognizers.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/ErrorHandling.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/MC/MCContext.h"
25 #define GET_INSTRINFO_CTOR
26 #define GET_INSTRINFO_MC_DESC
27 #include "SPUGenInstrInfo.inc"
29 using namespace llvm;
31 namespace {
32 //! Predicate for an unconditional branch instruction
33 inline bool isUncondBranch(const MachineInstr *I) {
34 unsigned opc = I->getOpcode();
36 return (opc == SPU::BR
37 || opc == SPU::BRA
38 || opc == SPU::BI);
41 //! Predicate for a conditional branch instruction
42 inline bool isCondBranch(const MachineInstr *I) {
43 unsigned opc = I->getOpcode();
45 return (opc == SPU::BRNZr32
46 || opc == SPU::BRNZv4i32
47 || opc == SPU::BRZr32
48 || opc == SPU::BRZv4i32
49 || opc == SPU::BRHNZr16
50 || opc == SPU::BRHNZv8i16
51 || opc == SPU::BRHZr16
52 || opc == SPU::BRHZv8i16);
56 SPUInstrInfo::SPUInstrInfo(SPUTargetMachine &tm)
57 : SPUGenInstrInfo(SPU::ADJCALLSTACKDOWN, SPU::ADJCALLSTACKUP),
58 TM(tm),
59 RI(*TM.getSubtargetImpl(), *this)
60 { /* NOP */ }
62 /// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
63 /// this target when scheduling the DAG.
64 ScheduleHazardRecognizer *SPUInstrInfo::CreateTargetHazardRecognizer(
65 const TargetMachine *TM,
66 const ScheduleDAG *DAG) const {
67 const TargetInstrInfo *TII = TM->getInstrInfo();
68 assert(TII && "No InstrInfo?");
69 return new SPUHazardRecognizer(*TII);
72 unsigned
73 SPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
74 int &FrameIndex) const {
75 switch (MI->getOpcode()) {
76 default: break;
77 case SPU::LQDv16i8:
78 case SPU::LQDv8i16:
79 case SPU::LQDv4i32:
80 case SPU::LQDv4f32:
81 case SPU::LQDv2f64:
82 case SPU::LQDr128:
83 case SPU::LQDr64:
84 case SPU::LQDr32:
85 case SPU::LQDr16: {
86 const MachineOperand MOp1 = MI->getOperand(1);
87 const MachineOperand MOp2 = MI->getOperand(2);
88 if (MOp1.isImm() && MOp2.isFI()) {
89 FrameIndex = MOp2.getIndex();
90 return MI->getOperand(0).getReg();
92 break;
95 return 0;
98 unsigned
99 SPUInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
100 int &FrameIndex) const {
101 switch (MI->getOpcode()) {
102 default: break;
103 case SPU::STQDv16i8:
104 case SPU::STQDv8i16:
105 case SPU::STQDv4i32:
106 case SPU::STQDv4f32:
107 case SPU::STQDv2f64:
108 case SPU::STQDr128:
109 case SPU::STQDr64:
110 case SPU::STQDr32:
111 case SPU::STQDr16:
112 case SPU::STQDr8: {
113 const MachineOperand MOp1 = MI->getOperand(1);
114 const MachineOperand MOp2 = MI->getOperand(2);
115 if (MOp1.isImm() && MOp2.isFI()) {
116 FrameIndex = MOp2.getIndex();
117 return MI->getOperand(0).getReg();
119 break;
122 return 0;
125 void SPUInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
126 MachineBasicBlock::iterator I, DebugLoc DL,
127 unsigned DestReg, unsigned SrcReg,
128 bool KillSrc) const
130 // We support cross register class moves for our aliases, such as R3 in any
131 // reg class to any other reg class containing R3. This is required because
132 // we instruction select bitconvert i64 -> f64 as a noop for example, so our
133 // types have no specific meaning.
135 BuildMI(MBB, I, DL, get(SPU::LRr128), DestReg)
136 .addReg(SrcReg, getKillRegState(KillSrc));
139 void
140 SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
141 MachineBasicBlock::iterator MI,
142 unsigned SrcReg, bool isKill, int FrameIdx,
143 const TargetRegisterClass *RC,
144 const TargetRegisterInfo *TRI) const
146 unsigned opc;
147 bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset());
148 if (RC == SPU::GPRCRegisterClass) {
149 opc = (isValidFrameIdx ? SPU::STQDr128 : SPU::STQXr128);
150 } else if (RC == SPU::R64CRegisterClass) {
151 opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
152 } else if (RC == SPU::R64FPRegisterClass) {
153 opc = (isValidFrameIdx ? SPU::STQDr64 : SPU::STQXr64);
154 } else if (RC == SPU::R32CRegisterClass) {
155 opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
156 } else if (RC == SPU::R32FPRegisterClass) {
157 opc = (isValidFrameIdx ? SPU::STQDr32 : SPU::STQXr32);
158 } else if (RC == SPU::R16CRegisterClass) {
159 opc = (isValidFrameIdx ? SPU::STQDr16 : SPU::STQXr16);
160 } else if (RC == SPU::R8CRegisterClass) {
161 opc = (isValidFrameIdx ? SPU::STQDr8 : SPU::STQXr8);
162 } else if (RC == SPU::VECREGRegisterClass) {
163 opc = (isValidFrameIdx) ? SPU::STQDv16i8 : SPU::STQXv16i8;
164 } else {
165 llvm_unreachable("Unknown regclass!");
168 DebugLoc DL;
169 if (MI != MBB.end()) DL = MI->getDebugLoc();
170 addFrameReference(BuildMI(MBB, MI, DL, get(opc))
171 .addReg(SrcReg, getKillRegState(isKill)), FrameIdx);
174 void
175 SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
176 MachineBasicBlock::iterator MI,
177 unsigned DestReg, int FrameIdx,
178 const TargetRegisterClass *RC,
179 const TargetRegisterInfo *TRI) const
181 unsigned opc;
182 bool isValidFrameIdx = (FrameIdx < SPUFrameLowering::maxFrameOffset());
183 if (RC == SPU::GPRCRegisterClass) {
184 opc = (isValidFrameIdx ? SPU::LQDr128 : SPU::LQXr128);
185 } else if (RC == SPU::R64CRegisterClass) {
186 opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
187 } else if (RC == SPU::R64FPRegisterClass) {
188 opc = (isValidFrameIdx ? SPU::LQDr64 : SPU::LQXr64);
189 } else if (RC == SPU::R32CRegisterClass) {
190 opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
191 } else if (RC == SPU::R32FPRegisterClass) {
192 opc = (isValidFrameIdx ? SPU::LQDr32 : SPU::LQXr32);
193 } else if (RC == SPU::R16CRegisterClass) {
194 opc = (isValidFrameIdx ? SPU::LQDr16 : SPU::LQXr16);
195 } else if (RC == SPU::R8CRegisterClass) {
196 opc = (isValidFrameIdx ? SPU::LQDr8 : SPU::LQXr8);
197 } else if (RC == SPU::VECREGRegisterClass) {
198 opc = (isValidFrameIdx) ? SPU::LQDv16i8 : SPU::LQXv16i8;
199 } else {
200 llvm_unreachable("Unknown regclass in loadRegFromStackSlot!");
203 DebugLoc DL;
204 if (MI != MBB.end()) DL = MI->getDebugLoc();
205 addFrameReference(BuildMI(MBB, MI, DL, get(opc), DestReg), FrameIdx);
208 //! Branch analysis
210 \note This code was kiped from PPC. There may be more branch analysis for
211 CellSPU than what's currently done here.
213 bool
214 SPUInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
215 MachineBasicBlock *&FBB,
216 SmallVectorImpl<MachineOperand> &Cond,
217 bool AllowModify) const {
218 // If the block has no terminators, it just falls into the block after it.
219 MachineBasicBlock::iterator I = MBB.end();
220 if (I == MBB.begin())
221 return false;
222 --I;
223 while (I->isDebugValue()) {
224 if (I == MBB.begin())
225 return false;
226 --I;
228 if (!isUnpredicatedTerminator(I))
229 return false;
231 // Get the last instruction in the block.
232 MachineInstr *LastInst = I;
234 // If there is only one terminator instruction, process it.
235 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
236 if (isUncondBranch(LastInst)) {
237 // Check for jump tables
238 if (!LastInst->getOperand(0).isMBB())
239 return true;
240 TBB = LastInst->getOperand(0).getMBB();
241 return false;
242 } else if (isCondBranch(LastInst)) {
243 // Block ends with fall-through condbranch.
244 TBB = LastInst->getOperand(1).getMBB();
245 DEBUG(errs() << "Pushing LastInst: ");
246 DEBUG(LastInst->dump());
247 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
248 Cond.push_back(LastInst->getOperand(0));
249 return false;
251 // Otherwise, don't know what this is.
252 return true;
255 // Get the instruction before it if it's a terminator.
256 MachineInstr *SecondLastInst = I;
258 // If there are three terminators, we don't know what sort of block this is.
259 if (SecondLastInst && I != MBB.begin() &&
260 isUnpredicatedTerminator(--I))
261 return true;
263 // If the block ends with a conditional and unconditional branch, handle it.
264 if (isCondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
265 TBB = SecondLastInst->getOperand(1).getMBB();
266 DEBUG(errs() << "Pushing SecondLastInst: ");
267 DEBUG(SecondLastInst->dump());
268 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
269 Cond.push_back(SecondLastInst->getOperand(0));
270 FBB = LastInst->getOperand(0).getMBB();
271 return false;
274 // If the block ends with two unconditional branches, handle it. The second
275 // one is not executed, so remove it.
276 if (isUncondBranch(SecondLastInst) && isUncondBranch(LastInst)) {
277 TBB = SecondLastInst->getOperand(0).getMBB();
278 I = LastInst;
279 if (AllowModify)
280 I->eraseFromParent();
281 return false;
284 // Otherwise, can't handle this.
285 return true;
288 // search MBB for branch hint labels and branch hit ops
289 static void removeHBR( MachineBasicBlock &MBB) {
290 for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I){
291 if (I->getOpcode() == SPU::HBRA ||
292 I->getOpcode() == SPU::HBR_LABEL){
293 I=MBB.erase(I);
298 unsigned
299 SPUInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
300 MachineBasicBlock::iterator I = MBB.end();
301 removeHBR(MBB);
302 if (I == MBB.begin())
303 return 0;
304 --I;
305 while (I->isDebugValue()) {
306 if (I == MBB.begin())
307 return 0;
308 --I;
310 if (!isCondBranch(I) && !isUncondBranch(I))
311 return 0;
313 // Remove the first branch.
314 DEBUG(errs() << "Removing branch: ");
315 DEBUG(I->dump());
316 I->eraseFromParent();
317 I = MBB.end();
318 if (I == MBB.begin())
319 return 1;
321 --I;
322 if (!(isCondBranch(I) || isUncondBranch(I)))
323 return 1;
325 // Remove the second branch.
326 DEBUG(errs() << "Removing second branch: ");
327 DEBUG(I->dump());
328 I->eraseFromParent();
329 return 2;
332 /** Find the optimal position for a hint branch instruction in a basic block.
333 * This should take into account:
334 * -the branch hint delays
335 * -congestion of the memory bus
336 * -dual-issue scheduling (i.e. avoid insertion of nops)
337 * Current implementation is rather simplistic.
339 static MachineBasicBlock::iterator findHBRPosition(MachineBasicBlock &MBB)
341 MachineBasicBlock::iterator J = MBB.end();
342 for( int i=0; i<8; i++) {
343 if( J == MBB.begin() ) return J;
344 J--;
346 return J;
349 unsigned
350 SPUInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
351 MachineBasicBlock *FBB,
352 const SmallVectorImpl<MachineOperand> &Cond,
353 DebugLoc DL) const {
354 // Shouldn't be a fall through.
355 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
356 assert((Cond.size() == 2 || Cond.size() == 0) &&
357 "SPU branch conditions have two components!");
359 MachineInstrBuilder MIB;
360 //TODO: make a more accurate algorithm.
361 bool haveHBR = MBB.size()>8;
363 removeHBR(MBB);
364 MCSymbol *branchLabel = MBB.getParent()->getContext().CreateTempSymbol();
365 // Add a label just before the branch
366 if (haveHBR)
367 MIB = BuildMI(&MBB, DL, get(SPU::HBR_LABEL)).addSym(branchLabel);
369 // One-way branch.
370 if (FBB == 0) {
371 if (Cond.empty()) {
372 // Unconditional branch
373 MIB = BuildMI(&MBB, DL, get(SPU::BR));
374 MIB.addMBB(TBB);
376 DEBUG(errs() << "Inserted one-way uncond branch: ");
377 DEBUG((*MIB).dump());
379 // basic blocks have just one branch so it is safe to add the hint a its
380 if (haveHBR) {
381 MIB = BuildMI( MBB, findHBRPosition(MBB), DL, get(SPU::HBRA));
382 MIB.addSym(branchLabel);
383 MIB.addMBB(TBB);
385 } else {
386 // Conditional branch
387 MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
388 MIB.addReg(Cond[1].getReg()).addMBB(TBB);
390 if (haveHBR) {
391 MIB = BuildMI(MBB, findHBRPosition(MBB), DL, get(SPU::HBRA));
392 MIB.addSym(branchLabel);
393 MIB.addMBB(TBB);
396 DEBUG(errs() << "Inserted one-way cond branch: ");
397 DEBUG((*MIB).dump());
399 return 1;
400 } else {
401 MIB = BuildMI(&MBB, DL, get(Cond[0].getImm()));
402 MachineInstrBuilder MIB2 = BuildMI(&MBB, DL, get(SPU::BR));
404 // Two-way Conditional Branch.
405 MIB.addReg(Cond[1].getReg()).addMBB(TBB);
406 MIB2.addMBB(FBB);
408 if (haveHBR) {
409 MIB = BuildMI( MBB, findHBRPosition(MBB), DL, get(SPU::HBRA));
410 MIB.addSym(branchLabel);
411 MIB.addMBB(FBB);
414 DEBUG(errs() << "Inserted conditional branch: ");
415 DEBUG((*MIB).dump());
416 DEBUG(errs() << "part 2: ");
417 DEBUG((*MIB2).dump());
418 return 2;
422 //! Reverses a branch's condition, returning false on success.
423 bool
424 SPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
425 const {
426 // Pretty brainless way of inverting the condition, but it works, considering
427 // there are only two conditions...
428 static struct {
429 unsigned Opc; //! The incoming opcode
430 unsigned RevCondOpc; //! The reversed condition opcode
431 } revconds[] = {
432 { SPU::BRNZr32, SPU::BRZr32 },
433 { SPU::BRNZv4i32, SPU::BRZv4i32 },
434 { SPU::BRZr32, SPU::BRNZr32 },
435 { SPU::BRZv4i32, SPU::BRNZv4i32 },
436 { SPU::BRHNZr16, SPU::BRHZr16 },
437 { SPU::BRHNZv8i16, SPU::BRHZv8i16 },
438 { SPU::BRHZr16, SPU::BRHNZr16 },
439 { SPU::BRHZv8i16, SPU::BRHNZv8i16 }
442 unsigned Opc = unsigned(Cond[0].getImm());
443 // Pretty dull mapping between the two conditions that SPU can generate:
444 for (int i = sizeof(revconds)/sizeof(revconds[0]) - 1; i >= 0; --i) {
445 if (revconds[i].Opc == Opc) {
446 Cond[0].setImm(revconds[i].RevCondOpc);
447 return false;
451 return true;