Use BranchProbability instead of floating points in IfConverter.
[llvm/stm8.git] / include / llvm / Target / TargetInstrInfo.h
blobf6635667b5d95e0f0ae60451838d097ac57fe24e
1 //===-- llvm/Target/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the target machine instruction set to the code generator.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_TARGET_TARGETINSTRINFO_H
15 #define LLVM_TARGET_TARGETINSTRINFO_H
17 #include "llvm/MC/MCInstrInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
20 namespace llvm {
22 class InstrItineraryData;
23 class LiveVariables;
24 class MCAsmInfo;
25 class MachineMemOperand;
26 class MachineRegisterInfo;
27 class MDNode;
28 class MCInst;
29 class SDNode;
30 class ScheduleHazardRecognizer;
31 class SelectionDAG;
32 class ScheduleDAG;
33 class TargetRegisterClass;
34 class TargetRegisterInfo;
35 class BranchProbability;
37 template<class T> class SmallVectorImpl;
40 //---------------------------------------------------------------------------
41 ///
42 /// TargetInstrInfo - Interface to description of machine instruction set
43 ///
44 class TargetInstrInfo : public MCInstrInfo {
45 TargetInstrInfo(const TargetInstrInfo &); // DO NOT IMPLEMENT
46 void operator=(const TargetInstrInfo &); // DO NOT IMPLEMENT
47 public:
48 TargetInstrInfo(int CFSetupOpcode = -1, int CFDestroyOpcode = -1)
49 : CallFrameSetupOpcode(CFSetupOpcode),
50 CallFrameDestroyOpcode(CFDestroyOpcode) {
53 virtual ~TargetInstrInfo();
55 /// getRegClass - Givem a machine instruction descriptor, returns the register
56 /// class constraint for OpNum, or NULL.
57 const TargetRegisterClass *getRegClass(const MCInstrDesc &TID,
58 unsigned OpNum,
59 const TargetRegisterInfo *TRI) const;
61 /// isTriviallyReMaterializable - Return true if the instruction is trivially
62 /// rematerializable, meaning it has no side effects and requires no operands
63 /// that aren't always available.
64 bool isTriviallyReMaterializable(const MachineInstr *MI,
65 AliasAnalysis *AA = 0) const {
66 return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF ||
67 (MI->getDesc().isRematerializable() &&
68 (isReallyTriviallyReMaterializable(MI, AA) ||
69 isReallyTriviallyReMaterializableGeneric(MI, AA)));
72 protected:
73 /// isReallyTriviallyReMaterializable - For instructions with opcodes for
74 /// which the M_REMATERIALIZABLE flag is set, this hook lets the target
75 /// specify whether the instruction is actually trivially rematerializable,
76 /// taking into consideration its operands. This predicate must return false
77 /// if the instruction has any side effects other than producing a value, or
78 /// if it requres any address registers that are not always available.
79 virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
80 AliasAnalysis *AA) const {
81 return false;
84 private:
85 /// isReallyTriviallyReMaterializableGeneric - For instructions with opcodes
86 /// for which the M_REMATERIALIZABLE flag is set and the target hook
87 /// isReallyTriviallyReMaterializable returns false, this function does
88 /// target-independent tests to determine if the instruction is really
89 /// trivially rematerializable.
90 bool isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
91 AliasAnalysis *AA) const;
93 public:
94 /// getCallFrameSetup/DestroyOpcode - These methods return the opcode of the
95 /// frame setup/destroy instructions if they exist (-1 otherwise). Some
96 /// targets use pseudo instructions in order to abstract away the difference
97 /// between operating with a frame pointer and operating without, through the
98 /// use of these two instructions.
99 ///
100 int getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
101 int getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
103 /// isCoalescableExtInstr - Return true if the instruction is a "coalescable"
104 /// extension instruction. That is, it's like a copy where it's legal for the
105 /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns
106 /// true, then it's expected the pre-extension value is available as a subreg
107 /// of the result register. This also returns the sub-register index in
108 /// SubIdx.
109 virtual bool isCoalescableExtInstr(const MachineInstr &MI,
110 unsigned &SrcReg, unsigned &DstReg,
111 unsigned &SubIdx) const {
112 return false;
115 /// isLoadFromStackSlot - If the specified machine instruction is a direct
116 /// load from a stack slot, return the virtual or physical register number of
117 /// the destination along with the FrameIndex of the loaded stack slot. If
118 /// not, return 0. This predicate must return 0 if the instruction has
119 /// any side effects other than loading from the stack slot.
120 virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
121 int &FrameIndex) const {
122 return 0;
125 /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
126 /// stack locations as well. This uses a heuristic so it isn't
127 /// reliable for correctness.
128 virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
129 int &FrameIndex) const {
130 return 0;
133 /// hasLoadFromStackSlot - If the specified machine instruction has
134 /// a load from a stack slot, return true along with the FrameIndex
135 /// of the loaded stack slot and the machine mem operand containing
136 /// the reference. If not, return false. Unlike
137 /// isLoadFromStackSlot, this returns true for any instructions that
138 /// loads from the stack. This is just a hint, as some cases may be
139 /// missed.
140 virtual bool hasLoadFromStackSlot(const MachineInstr *MI,
141 const MachineMemOperand *&MMO,
142 int &FrameIndex) const {
143 return 0;
146 /// isStoreToStackSlot - If the specified machine instruction is a direct
147 /// store to a stack slot, return the virtual or physical register number of
148 /// the source reg along with the FrameIndex of the loaded stack slot. If
149 /// not, return 0. This predicate must return 0 if the instruction has
150 /// any side effects other than storing to the stack slot.
151 virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
152 int &FrameIndex) const {
153 return 0;
156 /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
157 /// stack locations as well. This uses a heuristic so it isn't
158 /// reliable for correctness.
159 virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
160 int &FrameIndex) const {
161 return 0;
164 /// hasStoreToStackSlot - If the specified machine instruction has a
165 /// store to a stack slot, return true along with the FrameIndex of
166 /// the loaded stack slot and the machine mem operand containing the
167 /// reference. If not, return false. Unlike isStoreToStackSlot,
168 /// this returns true for any instructions that stores to the
169 /// stack. This is just a hint, as some cases may be missed.
170 virtual bool hasStoreToStackSlot(const MachineInstr *MI,
171 const MachineMemOperand *&MMO,
172 int &FrameIndex) const {
173 return 0;
176 /// reMaterialize - Re-issue the specified 'original' instruction at the
177 /// specific location targeting a new destination register.
178 /// The register in Orig->getOperand(0).getReg() will be substituted by
179 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
180 /// SubIdx.
181 virtual void reMaterialize(MachineBasicBlock &MBB,
182 MachineBasicBlock::iterator MI,
183 unsigned DestReg, unsigned SubIdx,
184 const MachineInstr *Orig,
185 const TargetRegisterInfo &TRI) const = 0;
187 /// scheduleTwoAddrSource - Schedule the copy / re-mat of the source of the
188 /// two-addrss instruction inserted by two-address pass.
189 virtual void scheduleTwoAddrSource(MachineInstr *SrcMI,
190 MachineInstr *UseMI,
191 const TargetRegisterInfo &TRI) const {
192 // Do nothing.
195 /// duplicate - Create a duplicate of the Orig instruction in MF. This is like
196 /// MachineFunction::CloneMachineInstr(), but the target may update operands
197 /// that are required to be unique.
199 /// The instruction must be duplicable as indicated by isNotDuplicable().
200 virtual MachineInstr *duplicate(MachineInstr *Orig,
201 MachineFunction &MF) const = 0;
203 /// convertToThreeAddress - This method must be implemented by targets that
204 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
205 /// may be able to convert a two-address instruction into one or more true
206 /// three-address instructions on demand. This allows the X86 target (for
207 /// example) to convert ADD and SHL instructions into LEA instructions if they
208 /// would require register copies due to two-addressness.
210 /// This method returns a null pointer if the transformation cannot be
211 /// performed, otherwise it returns the last new instruction.
213 virtual MachineInstr *
214 convertToThreeAddress(MachineFunction::iterator &MFI,
215 MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const {
216 return 0;
219 /// commuteInstruction - If a target has any instructions that are
220 /// commutable but require converting to different instructions or making
221 /// non-trivial changes to commute them, this method can overloaded to do
222 /// that. The default implementation simply swaps the commutable operands.
223 /// If NewMI is false, MI is modified in place and returned; otherwise, a
224 /// new machine instruction is created and returned. Do not call this
225 /// method for a non-commutable instruction, but there may be some cases
226 /// where this method fails and returns null.
227 virtual MachineInstr *commuteInstruction(MachineInstr *MI,
228 bool NewMI = false) const = 0;
230 /// findCommutedOpIndices - If specified MI is commutable, return the two
231 /// operand indices that would swap value. Return false if the instruction
232 /// is not in a form which this routine understands.
233 virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
234 unsigned &SrcOpIdx2) const = 0;
236 /// produceSameValue - Return true if two machine instructions would produce
237 /// identical values. By default, this is only true when the two instructions
238 /// are deemed identical except for defs. If this function is called when the
239 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
240 /// aggressive checks.
241 virtual bool produceSameValue(const MachineInstr *MI0,
242 const MachineInstr *MI1,
243 const MachineRegisterInfo *MRI = 0) const = 0;
245 /// AnalyzeBranch - Analyze the branching code at the end of MBB, returning
246 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
247 /// implemented for a target). Upon success, this returns false and returns
248 /// with the following information in various cases:
250 /// 1. If this block ends with no branches (it just falls through to its succ)
251 /// just return false, leaving TBB/FBB null.
252 /// 2. If this block ends with only an unconditional branch, it sets TBB to be
253 /// the destination block.
254 /// 3. If this block ends with a conditional branch and it falls through to a
255 /// successor block, it sets TBB to be the branch destination block and a
256 /// list of operands that evaluate the condition. These operands can be
257 /// passed to other TargetInstrInfo methods to create new branches.
258 /// 4. If this block ends with a conditional branch followed by an
259 /// unconditional branch, it returns the 'true' destination in TBB, the
260 /// 'false' destination in FBB, and a list of operands that evaluate the
261 /// condition. These operands can be passed to other TargetInstrInfo
262 /// methods to create new branches.
264 /// Note that RemoveBranch and InsertBranch must be implemented to support
265 /// cases where this method returns success.
267 /// If AllowModify is true, then this routine is allowed to modify the basic
268 /// block (e.g. delete instructions after the unconditional branch).
270 virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
271 MachineBasicBlock *&FBB,
272 SmallVectorImpl<MachineOperand> &Cond,
273 bool AllowModify = false) const {
274 return true;
277 /// RemoveBranch - Remove the branching code at the end of the specific MBB.
278 /// This is only invoked in cases where AnalyzeBranch returns success. It
279 /// returns the number of instructions that were removed.
280 virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const {
281 assert(0 && "Target didn't implement TargetInstrInfo::RemoveBranch!");
282 return 0;
285 /// InsertBranch - Insert branch code into the end of the specified
286 /// MachineBasicBlock. The operands to this method are the same as those
287 /// returned by AnalyzeBranch. This is only invoked in cases where
288 /// AnalyzeBranch returns success. It returns the number of instructions
289 /// inserted.
291 /// It is also invoked by tail merging to add unconditional branches in
292 /// cases where AnalyzeBranch doesn't apply because there was no original
293 /// branch to analyze. At least this much must be implemented, else tail
294 /// merging needs to be disabled.
295 virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
296 MachineBasicBlock *FBB,
297 const SmallVectorImpl<MachineOperand> &Cond,
298 DebugLoc DL) const {
299 assert(0 && "Target didn't implement TargetInstrInfo::InsertBranch!");
300 return 0;
303 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
304 /// after it, replacing it with an unconditional branch to NewDest. This is
305 /// used by the tail merging pass.
306 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
307 MachineBasicBlock *NewDest) const = 0;
309 /// isLegalToSplitMBBAt - Return true if it's legal to split the given basic
310 /// block at the specified instruction (i.e. instruction would be the start
311 /// of a new basic block).
312 virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
313 MachineBasicBlock::iterator MBBI) const {
314 return true;
317 /// isProfitableToIfCvt - Return true if it's profitable to predicate
318 /// instructions with accumulated instruction latency of "NumCycles"
319 /// of the specified basic block, where the probability of the instructions
320 /// being executed is given by Probability, and Confidence is a measure
321 /// of our confidence that it will be properly predicted.
322 virtual
323 bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
324 unsigned ExtraPredCycles,
325 const BranchProbability &Probability) const {
326 return false;
329 /// isProfitableToIfCvt - Second variant of isProfitableToIfCvt, this one
330 /// checks for the case where two basic blocks from true and false path
331 /// of a if-then-else (diamond) are predicated on mutally exclusive
332 /// predicates, where the probability of the true path being taken is given
333 /// by Probability, and Confidence is a measure of our confidence that it
334 /// will be properly predicted.
335 virtual bool
336 isProfitableToIfCvt(MachineBasicBlock &TMBB,
337 unsigned NumTCycles, unsigned ExtraTCycles,
338 MachineBasicBlock &FMBB,
339 unsigned NumFCycles, unsigned ExtraFCycles,
340 const BranchProbability &Probability) const {
341 return false;
344 /// isProfitableToDupForIfCvt - Return true if it's profitable for
345 /// if-converter to duplicate instructions of specified accumulated
346 /// instruction latencies in the specified MBB to enable if-conversion.
347 /// The probability of the instructions being executed is given by
348 /// Probability, and Confidence is a measure of our confidence that it
349 /// will be properly predicted.
350 virtual bool
351 isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
352 const BranchProbability &Probability) const {
353 return false;
356 /// copyPhysReg - Emit instructions to copy a pair of physical registers.
357 virtual void copyPhysReg(MachineBasicBlock &MBB,
358 MachineBasicBlock::iterator MI, DebugLoc DL,
359 unsigned DestReg, unsigned SrcReg,
360 bool KillSrc) const {
361 assert(0 && "Target didn't implement TargetInstrInfo::copyPhysReg!");
364 /// storeRegToStackSlot - Store the specified register of the given register
365 /// class to the specified stack frame index. The store instruction is to be
366 /// added to the given machine basic block before the specified machine
367 /// instruction. If isKill is true, the register operand is the last use and
368 /// must be marked kill.
369 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
370 MachineBasicBlock::iterator MI,
371 unsigned SrcReg, bool isKill, int FrameIndex,
372 const TargetRegisterClass *RC,
373 const TargetRegisterInfo *TRI) const {
374 assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!");
377 /// loadRegFromStackSlot - Load the specified register of the given register
378 /// class from the specified stack frame index. The load instruction is to be
379 /// added to the given machine basic block before the specified machine
380 /// instruction.
381 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
382 MachineBasicBlock::iterator MI,
383 unsigned DestReg, int FrameIndex,
384 const TargetRegisterClass *RC,
385 const TargetRegisterInfo *TRI) const {
386 assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!");
389 /// emitFrameIndexDebugValue - Emit a target-dependent form of
390 /// DBG_VALUE encoding the address of a frame index. Addresses would
391 /// normally be lowered the same way as other addresses on the target,
392 /// e.g. in load instructions. For targets that do not support this
393 /// the debug info is simply lost.
394 /// If you add this for a target you should handle this DBG_VALUE in the
395 /// target-specific AsmPrinter code as well; you will probably get invalid
396 /// assembly output if you don't.
397 virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
398 int FrameIx,
399 uint64_t Offset,
400 const MDNode *MDPtr,
401 DebugLoc dl) const {
402 return 0;
405 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
406 /// slot into the specified machine instruction for the specified operand(s).
407 /// If this is possible, a new instruction is returned with the specified
408 /// operand folded, otherwise NULL is returned.
409 /// The new instruction is inserted before MI, and the client is responsible
410 /// for removing the old instruction.
411 MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
412 const SmallVectorImpl<unsigned> &Ops,
413 int FrameIndex) const;
415 /// foldMemoryOperand - Same as the previous version except it allows folding
416 /// of any load and store from / to any address, not just from a specific
417 /// stack slot.
418 MachineInstr* foldMemoryOperand(MachineBasicBlock::iterator MI,
419 const SmallVectorImpl<unsigned> &Ops,
420 MachineInstr* LoadMI) const;
422 protected:
423 /// foldMemoryOperandImpl - Target-dependent implementation for
424 /// foldMemoryOperand. Target-independent code in foldMemoryOperand will
425 /// take care of adding a MachineMemOperand to the newly created instruction.
426 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
427 MachineInstr* MI,
428 const SmallVectorImpl<unsigned> &Ops,
429 int FrameIndex) const {
430 return 0;
433 /// foldMemoryOperandImpl - Target-dependent implementation for
434 /// foldMemoryOperand. Target-independent code in foldMemoryOperand will
435 /// take care of adding a MachineMemOperand to the newly created instruction.
436 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
437 MachineInstr* MI,
438 const SmallVectorImpl<unsigned> &Ops,
439 MachineInstr* LoadMI) const {
440 return 0;
443 public:
444 /// canFoldMemoryOperand - Returns true for the specified load / store if
445 /// folding is possible.
446 virtual
447 bool canFoldMemoryOperand(const MachineInstr *MI,
448 const SmallVectorImpl<unsigned> &Ops) const =0;
450 /// unfoldMemoryOperand - Separate a single instruction which folded a load or
451 /// a store or a load and a store into two or more instruction. If this is
452 /// possible, returns true as well as the new instructions by reference.
453 virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
454 unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
455 SmallVectorImpl<MachineInstr*> &NewMIs) const{
456 return false;
459 virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
460 SmallVectorImpl<SDNode*> &NewNodes) const {
461 return false;
464 /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
465 /// instruction after load / store are unfolded from an instruction of the
466 /// specified opcode. It returns zero if the specified unfolding is not
467 /// possible. If LoadRegIndex is non-null, it is filled in with the operand
468 /// index of the operand which will hold the register holding the loaded
469 /// value.
470 virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
471 bool UnfoldLoad, bool UnfoldStore,
472 unsigned *LoadRegIndex = 0) const {
473 return 0;
476 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
477 /// to determine if two loads are loading from the same base address. It
478 /// should only return true if the base pointers are the same and the
479 /// only differences between the two addresses are the offset. It also returns
480 /// the offsets by reference.
481 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
482 int64_t &Offset1, int64_t &Offset2) const {
483 return false;
486 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
487 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
488 /// be scheduled togther. On some targets if two loads are loading from
489 /// addresses in the same cache line, it's better if they are scheduled
490 /// together. This function takes two integers that represent the load offsets
491 /// from the common base address. It returns true if it decides it's desirable
492 /// to schedule the two loads together. "NumLoads" is the number of loads that
493 /// have already been scheduled after Load1.
494 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
495 int64_t Offset1, int64_t Offset2,
496 unsigned NumLoads) const {
497 return false;
500 /// ReverseBranchCondition - Reverses the branch condition of the specified
501 /// condition list, returning false on success and true if it cannot be
502 /// reversed.
503 virtual
504 bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
505 return true;
508 /// insertNoop - Insert a noop into the instruction stream at the specified
509 /// point.
510 virtual void insertNoop(MachineBasicBlock &MBB,
511 MachineBasicBlock::iterator MI) const;
514 /// getNoopForMachoTarget - Return the noop instruction to use for a noop.
515 virtual void getNoopForMachoTarget(MCInst &NopInst) const {
516 // Default to just using 'nop' string.
520 /// isPredicated - Returns true if the instruction is already predicated.
522 virtual bool isPredicated(const MachineInstr *MI) const {
523 return false;
526 /// isUnpredicatedTerminator - Returns true if the instruction is a
527 /// terminator instruction that has not been predicated.
528 virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
530 /// PredicateInstruction - Convert the instruction into a predicated
531 /// instruction. It returns true if the operation was successful.
532 virtual
533 bool PredicateInstruction(MachineInstr *MI,
534 const SmallVectorImpl<MachineOperand> &Pred) const = 0;
536 /// SubsumesPredicate - Returns true if the first specified predicate
537 /// subsumes the second, e.g. GE subsumes GT.
538 virtual
539 bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
540 const SmallVectorImpl<MachineOperand> &Pred2) const {
541 return false;
544 /// DefinesPredicate - If the specified instruction defines any predicate
545 /// or condition code register(s) used for predication, returns true as well
546 /// as the definition predicate(s) by reference.
547 virtual bool DefinesPredicate(MachineInstr *MI,
548 std::vector<MachineOperand> &Pred) const {
549 return false;
552 /// isPredicable - Return true if the specified instruction can be predicated.
553 /// By default, this returns true for every instruction with a
554 /// PredicateOperand.
555 virtual bool isPredicable(MachineInstr *MI) const {
556 return MI->getDesc().isPredicable();
559 /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine
560 /// instruction that defines the specified register class.
561 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
562 return true;
565 /// isSchedulingBoundary - Test if the given instruction should be
566 /// considered a scheduling boundary. This primarily includes labels and
567 /// terminators.
568 virtual bool isSchedulingBoundary(const MachineInstr *MI,
569 const MachineBasicBlock *MBB,
570 const MachineFunction &MF) const = 0;
572 /// Measure the specified inline asm to determine an approximation of its
573 /// length.
574 virtual unsigned getInlineAsmLength(const char *Str,
575 const MCAsmInfo &MAI) const;
577 /// CreateTargetHazardRecognizer - Allocate and return a hazard recognizer to
578 /// use for this target when scheduling the machine instructions before
579 /// register allocation.
580 virtual ScheduleHazardRecognizer*
581 CreateTargetHazardRecognizer(const TargetMachine *TM,
582 const ScheduleDAG *DAG) const = 0;
584 /// CreateTargetPostRAHazardRecognizer - Allocate and return a hazard
585 /// recognizer to use for this target when scheduling the machine instructions
586 /// after register allocation.
587 virtual ScheduleHazardRecognizer*
588 CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
589 const ScheduleDAG *DAG) const = 0;
591 /// AnalyzeCompare - For a comparison instruction, return the source register
592 /// in SrcReg and the value it compares against in CmpValue. Return true if
593 /// the comparison instruction can be analyzed.
594 virtual bool AnalyzeCompare(const MachineInstr *MI,
595 unsigned &SrcReg, int &Mask, int &Value) const {
596 return false;
599 /// OptimizeCompareInstr - See if the comparison instruction can be converted
600 /// into something more efficient. E.g., on ARM most instructions can set the
601 /// flags register, obviating the need for a separate CMP.
602 virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr,
603 unsigned SrcReg, int Mask, int Value,
604 const MachineRegisterInfo *MRI) const {
605 return false;
608 /// FoldImmediate - 'Reg' is known to be defined by a move immediate
609 /// instruction, try to fold the immediate into the use instruction.
610 virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
611 unsigned Reg, MachineRegisterInfo *MRI) const {
612 return false;
615 /// getNumMicroOps - Return the number of u-operations the given machine
616 /// instruction will be decoded to on the target cpu.
617 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
618 const MachineInstr *MI) const;
620 /// isZeroCost - Return true for pseudo instructions that don't consume any
621 /// machine resources in their current form. These are common cases that the
622 /// scheduler should consider free, rather than conservatively handling them
623 /// as instructions with no itinerary.
624 bool isZeroCost(unsigned Opcode) const {
625 return Opcode <= TargetOpcode::COPY;
628 /// getOperandLatency - Compute and return the use operand latency of a given
629 /// pair of def and use.
630 /// In most cases, the static scheduling itinerary was enough to determine the
631 /// operand latency. But it may not be possible for instructions with variable
632 /// number of defs / uses.
633 virtual int getOperandLatency(const InstrItineraryData *ItinData,
634 const MachineInstr *DefMI, unsigned DefIdx,
635 const MachineInstr *UseMI, unsigned UseIdx) const;
637 virtual int getOperandLatency(const InstrItineraryData *ItinData,
638 SDNode *DefNode, unsigned DefIdx,
639 SDNode *UseNode, unsigned UseIdx) const;
641 /// getInstrLatency - Compute the instruction latency of a given instruction.
642 /// If the instruction has higher cost when predicated, it's returned via
643 /// PredCost.
644 virtual int getInstrLatency(const InstrItineraryData *ItinData,
645 const MachineInstr *MI,
646 unsigned *PredCost = 0) const;
648 virtual int getInstrLatency(const InstrItineraryData *ItinData,
649 SDNode *Node) const;
651 /// isHighLatencyDef - Return true if this opcode has high latency to its
652 /// result.
653 virtual bool isHighLatencyDef(int opc) const { return false; }
655 /// hasHighOperandLatency - Compute operand latency between a def of 'Reg'
656 /// and an use in the current loop, return true if the target considered
657 /// it 'high'. This is used by optimization passes such as machine LICM to
658 /// determine whether it makes sense to hoist an instruction out even in
659 /// high register pressure situation.
660 virtual
661 bool hasHighOperandLatency(const InstrItineraryData *ItinData,
662 const MachineRegisterInfo *MRI,
663 const MachineInstr *DefMI, unsigned DefIdx,
664 const MachineInstr *UseMI, unsigned UseIdx) const {
665 return false;
668 /// hasLowDefLatency - Compute operand latency of a def of 'Reg', return true
669 /// if the target considered it 'low'.
670 virtual
671 bool hasLowDefLatency(const InstrItineraryData *ItinData,
672 const MachineInstr *DefMI, unsigned DefIdx) const;
674 private:
675 int CallFrameSetupOpcode, CallFrameDestroyOpcode;
678 /// TargetInstrInfoImpl - This is the default implementation of
679 /// TargetInstrInfo, which just provides a couple of default implementations
680 /// for various methods. This separated out because it is implemented in
681 /// libcodegen, not in libtarget.
682 class TargetInstrInfoImpl : public TargetInstrInfo {
683 protected:
684 TargetInstrInfoImpl(int CallFrameSetupOpcode = -1,
685 int CallFrameDestroyOpcode = -1)
686 : TargetInstrInfo(CallFrameSetupOpcode, CallFrameDestroyOpcode) {}
687 public:
688 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator OldInst,
689 MachineBasicBlock *NewDest) const;
690 virtual MachineInstr *commuteInstruction(MachineInstr *MI,
691 bool NewMI = false) const;
692 virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
693 unsigned &SrcOpIdx2) const;
694 virtual bool canFoldMemoryOperand(const MachineInstr *MI,
695 const SmallVectorImpl<unsigned> &Ops) const;
696 virtual bool PredicateInstruction(MachineInstr *MI,
697 const SmallVectorImpl<MachineOperand> &Pred) const;
698 virtual void reMaterialize(MachineBasicBlock &MBB,
699 MachineBasicBlock::iterator MI,
700 unsigned DestReg, unsigned SubReg,
701 const MachineInstr *Orig,
702 const TargetRegisterInfo &TRI) const;
703 virtual MachineInstr *duplicate(MachineInstr *Orig,
704 MachineFunction &MF) const;
705 virtual bool produceSameValue(const MachineInstr *MI0,
706 const MachineInstr *MI1,
707 const MachineRegisterInfo *MRI) const;
708 virtual bool isSchedulingBoundary(const MachineInstr *MI,
709 const MachineBasicBlock *MBB,
710 const MachineFunction &MF) const;
712 bool usePreRAHazardRecognizer() const;
714 virtual ScheduleHazardRecognizer *
715 CreateTargetHazardRecognizer(const TargetMachine*, const ScheduleDAG*) const;
717 virtual ScheduleHazardRecognizer *
718 CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
719 const ScheduleDAG*) const;
722 } // End llvm namespace
724 #endif