[Alignment][NFC] Convert StoreInst to MaybeAlign
[llvm-complete.git] / include / llvm / CodeGen / TargetInstrInfo.h
blob5011cf34c0ee247c8c80499cb08c8d8f4f46f38e
1 //===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file describes the target machine instruction set to the code generator.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_TARGET_TARGETINSTRINFO_H
14 #define LLVM_TARGET_TARGETINSTRINFO_H
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseMapInfo.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/CodeGen/LiveRegUnits.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineCombinerPattern.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineOperand.h"
27 #include "llvm/CodeGen/MachineOutliner.h"
28 #include "llvm/CodeGen/PseudoSourceValue.h"
29 #include "llvm/CodeGen/VirtRegMap.h"
30 #include "llvm/MC/MCInstrInfo.h"
31 #include "llvm/Support/BranchProbability.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include <cassert>
34 #include <cstddef>
35 #include <cstdint>
36 #include <utility>
37 #include <vector>
39 namespace llvm {
41 class AAResults;
42 class DFAPacketizer;
43 class InstrItineraryData;
44 class LiveIntervals;
45 class LiveVariables;
46 class MachineLoop;
47 class MachineMemOperand;
48 class MachineRegisterInfo;
49 class MCAsmInfo;
50 class MCInst;
51 struct MCSchedModel;
52 class Module;
53 class ScheduleDAG;
54 class ScheduleHazardRecognizer;
55 class SDNode;
56 class SelectionDAG;
57 class RegScavenger;
58 class TargetRegisterClass;
59 class TargetRegisterInfo;
60 class TargetSchedModel;
61 class TargetSubtargetInfo;
63 template <class T> class SmallVectorImpl;
65 using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>;
67 //---------------------------------------------------------------------------
68 ///
69 /// TargetInstrInfo - Interface to description of machine instruction set
70 ///
71 class TargetInstrInfo : public MCInstrInfo {
72 public:
73 TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
74 unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
75 : CallFrameSetupOpcode(CFSetupOpcode),
76 CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
77 ReturnOpcode(ReturnOpcode) {}
78 TargetInstrInfo(const TargetInstrInfo &) = delete;
79 TargetInstrInfo &operator=(const TargetInstrInfo &) = delete;
80 virtual ~TargetInstrInfo();
82 static bool isGenericOpcode(unsigned Opc) {
83 return Opc <= TargetOpcode::GENERIC_OP_END;
86 /// Given a machine instruction descriptor, returns the register
87 /// class constraint for OpNum, or NULL.
88 virtual
89 const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
90 const TargetRegisterInfo *TRI,
91 const MachineFunction &MF) const;
93 /// Return true if the instruction is trivially rematerializable, meaning it
94 /// has no side effects and requires no operands that aren't always available.
95 /// This means the only allowed uses are constants and unallocatable physical
96 /// registers so that the instructions result is independent of the place
97 /// in the function.
98 bool isTriviallyReMaterializable(const MachineInstr &MI,
99 AAResults *AA = nullptr) const {
100 return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
101 (MI.getDesc().isRematerializable() &&
102 (isReallyTriviallyReMaterializable(MI, AA) ||
103 isReallyTriviallyReMaterializableGeneric(MI, AA)));
106 protected:
107 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
108 /// set, this hook lets the target specify whether the instruction is actually
109 /// trivially rematerializable, taking into consideration its operands. This
110 /// predicate must return false if the instruction has any side effects other
111 /// than producing a value, or if it requres any address registers that are
112 /// not always available.
113 /// Requirements must be check as stated in isTriviallyReMaterializable() .
114 virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
115 AAResults *AA) const {
116 return false;
119 /// This method commutes the operands of the given machine instruction MI.
120 /// The operands to be commuted are specified by their indices OpIdx1 and
121 /// OpIdx2.
123 /// If a target has any instructions that are commutable but require
124 /// converting to different instructions or making non-trivial changes
125 /// to commute them, this method can be overloaded to do that.
126 /// The default implementation simply swaps the commutable operands.
128 /// If NewMI is false, MI is modified in place and returned; otherwise, a
129 /// new machine instruction is created and returned.
131 /// Do not call this method for a non-commutable instruction.
132 /// Even though the instruction is commutable, the method may still
133 /// fail to commute the operands, null pointer is returned in such cases.
134 virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
135 unsigned OpIdx1,
136 unsigned OpIdx2) const;
138 /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
139 /// operand indices to (ResultIdx1, ResultIdx2).
140 /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
141 /// predefined to some indices or be undefined (designated by the special
142 /// value 'CommuteAnyOperandIndex').
143 /// The predefined result indices cannot be re-defined.
144 /// The function returns true iff after the result pair redefinition
145 /// the fixed result pair is equal to or equivalent to the source pair of
146 /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
147 /// the pairs (x,y) and (y,x) are equivalent.
148 static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
149 unsigned CommutableOpIdx1,
150 unsigned CommutableOpIdx2);
152 private:
153 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
154 /// set and the target hook isReallyTriviallyReMaterializable returns false,
155 /// this function does target-independent tests to determine if the
156 /// instruction is really trivially rematerializable.
157 bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI,
158 AAResults *AA) const;
160 public:
161 /// These methods return the opcode of the frame setup/destroy instructions
162 /// if they exist (-1 otherwise). Some targets use pseudo instructions in
163 /// order to abstract away the difference between operating with a frame
164 /// pointer and operating without, through the use of these two instructions.
166 unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
167 unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
169 /// Returns true if the argument is a frame pseudo instruction.
170 bool isFrameInstr(const MachineInstr &I) const {
171 return I.getOpcode() == getCallFrameSetupOpcode() ||
172 I.getOpcode() == getCallFrameDestroyOpcode();
175 /// Returns true if the argument is a frame setup pseudo instruction.
176 bool isFrameSetup(const MachineInstr &I) const {
177 return I.getOpcode() == getCallFrameSetupOpcode();
180 /// Returns size of the frame associated with the given frame instruction.
181 /// For frame setup instruction this is frame that is set up space set up
182 /// after the instruction. For frame destroy instruction this is the frame
183 /// freed by the caller.
184 /// Note, in some cases a call frame (or a part of it) may be prepared prior
185 /// to the frame setup instruction. It occurs in the calls that involve
186 /// inalloca arguments. This function reports only the size of the frame part
187 /// that is set up between the frame setup and destroy pseudo instructions.
188 int64_t getFrameSize(const MachineInstr &I) const {
189 assert(isFrameInstr(I) && "Not a frame instruction");
190 assert(I.getOperand(0).getImm() >= 0);
191 return I.getOperand(0).getImm();
194 /// Returns the total frame size, which is made up of the space set up inside
195 /// the pair of frame start-stop instructions and the space that is set up
196 /// prior to the pair.
197 int64_t getFrameTotalSize(const MachineInstr &I) const {
198 if (isFrameSetup(I)) {
199 assert(I.getOperand(1).getImm() >= 0 &&
200 "Frame size must not be negative");
201 return getFrameSize(I) + I.getOperand(1).getImm();
203 return getFrameSize(I);
206 unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
207 unsigned getReturnOpcode() const { return ReturnOpcode; }
209 /// Returns the actual stack pointer adjustment made by an instruction
210 /// as part of a call sequence. By default, only call frame setup/destroy
211 /// instructions adjust the stack, but targets may want to override this
212 /// to enable more fine-grained adjustment, or adjust by a different value.
213 virtual int getSPAdjust(const MachineInstr &MI) const;
215 /// Return true if the instruction is a "coalescable" extension instruction.
216 /// That is, it's like a copy where it's legal for the source to overlap the
217 /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
218 /// expected the pre-extension value is available as a subreg of the result
219 /// register. This also returns the sub-register index in SubIdx.
220 virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
221 unsigned &DstReg, unsigned &SubIdx) const {
222 return false;
225 /// If the specified machine instruction is a direct
226 /// load from a stack slot, return the virtual or physical register number of
227 /// the destination along with the FrameIndex of the loaded stack slot. If
228 /// not, return 0. This predicate must return 0 if the instruction has
229 /// any side effects other than loading from the stack slot.
230 virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
231 int &FrameIndex) const {
232 return 0;
235 /// Optional extension of isLoadFromStackSlot that returns the number of
236 /// bytes loaded from the stack. This must be implemented if a backend
237 /// supports partial stack slot spills/loads to further disambiguate
238 /// what the load does.
239 virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
240 int &FrameIndex,
241 unsigned &MemBytes) const {
242 MemBytes = 0;
243 return isLoadFromStackSlot(MI, FrameIndex);
246 /// Check for post-frame ptr elimination stack locations as well.
247 /// This uses a heuristic so it isn't reliable for correctness.
248 virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
249 int &FrameIndex) const {
250 return 0;
253 /// If the specified machine instruction has a load from a stack slot,
254 /// return true along with the FrameIndices of the loaded stack slot and the
255 /// machine mem operands containing the reference.
256 /// If not, return false. Unlike isLoadFromStackSlot, this returns true for
257 /// any instructions that loads from the stack. This is just a hint, as some
258 /// cases may be missed.
259 virtual bool hasLoadFromStackSlot(
260 const MachineInstr &MI,
261 SmallVectorImpl<const MachineMemOperand *> &Accesses) const;
263 /// If the specified machine instruction is a direct
264 /// store to a stack slot, return the virtual or physical register number of
265 /// the source reg along with the FrameIndex of the loaded stack slot. If
266 /// not, return 0. This predicate must return 0 if the instruction has
267 /// any side effects other than storing to the stack slot.
268 virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
269 int &FrameIndex) const {
270 return 0;
273 /// Optional extension of isStoreToStackSlot that returns the number of
274 /// bytes stored to the stack. This must be implemented if a backend
275 /// supports partial stack slot spills/loads to further disambiguate
276 /// what the store does.
277 virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
278 int &FrameIndex,
279 unsigned &MemBytes) const {
280 MemBytes = 0;
281 return isStoreToStackSlot(MI, FrameIndex);
284 /// Check for post-frame ptr elimination stack locations as well.
285 /// This uses a heuristic, so it isn't reliable for correctness.
286 virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
287 int &FrameIndex) const {
288 return 0;
291 /// If the specified machine instruction has a store to a stack slot,
292 /// return true along with the FrameIndices of the loaded stack slot and the
293 /// machine mem operands containing the reference.
294 /// If not, return false. Unlike isStoreToStackSlot,
295 /// this returns true for any instructions that stores to the
296 /// stack. This is just a hint, as some cases may be missed.
297 virtual bool hasStoreToStackSlot(
298 const MachineInstr &MI,
299 SmallVectorImpl<const MachineMemOperand *> &Accesses) const;
301 /// Return true if the specified machine instruction
302 /// is a copy of one stack slot to another and has no other effect.
303 /// Provide the identity of the two frame indices.
304 virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
305 int &SrcFrameIndex) const {
306 return false;
309 /// Compute the size in bytes and offset within a stack slot of a spilled
310 /// register or subregister.
312 /// \param [out] Size in bytes of the spilled value.
313 /// \param [out] Offset in bytes within the stack slot.
314 /// \returns true if both Size and Offset are successfully computed.
316 /// Not all subregisters have computable spill slots. For example,
317 /// subregisters registers may not be byte-sized, and a pair of discontiguous
318 /// subregisters has no single offset.
320 /// Targets with nontrivial bigendian implementations may need to override
321 /// this, particularly to support spilled vector registers.
322 virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
323 unsigned &Size, unsigned &Offset,
324 const MachineFunction &MF) const;
326 /// Returns the size in bytes of the specified MachineInstr, or ~0U
327 /// when this function is not implemented by a target.
328 virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
329 return ~0U;
332 /// Return true if the instruction is as cheap as a move instruction.
334 /// Targets for different archs need to override this, and different
335 /// micro-architectures can also be finely tuned inside.
336 virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
337 return MI.isAsCheapAsAMove();
340 /// Return true if the instruction should be sunk by MachineSink.
342 /// MachineSink determines on its own whether the instruction is safe to sink;
343 /// this gives the target a hook to override the default behavior with regards
344 /// to which instructions should be sunk.
345 virtual bool shouldSink(const MachineInstr &MI) const { return true; }
347 /// Re-issue the specified 'original' instruction at the
348 /// specific location targeting a new destination register.
349 /// The register in Orig->getOperand(0).getReg() will be substituted by
350 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
351 /// SubIdx.
352 virtual void reMaterialize(MachineBasicBlock &MBB,
353 MachineBasicBlock::iterator MI, unsigned DestReg,
354 unsigned SubIdx, const MachineInstr &Orig,
355 const TargetRegisterInfo &TRI) const;
357 /// Clones instruction or the whole instruction bundle \p Orig and
358 /// insert into \p MBB before \p InsertBefore. The target may update operands
359 /// that are required to be unique.
361 /// \p Orig must not return true for MachineInstr::isNotDuplicable().
362 virtual MachineInstr &duplicate(MachineBasicBlock &MBB,
363 MachineBasicBlock::iterator InsertBefore,
364 const MachineInstr &Orig) const;
366 /// This method must be implemented by targets that
367 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
368 /// may be able to convert a two-address instruction into one or more true
369 /// three-address instructions on demand. This allows the X86 target (for
370 /// example) to convert ADD and SHL instructions into LEA instructions if they
371 /// would require register copies due to two-addressness.
373 /// This method returns a null pointer if the transformation cannot be
374 /// performed, otherwise it returns the last new instruction.
376 virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
377 MachineInstr &MI,
378 LiveVariables *LV) const {
379 return nullptr;
382 // This constant can be used as an input value of operand index passed to
383 // the method findCommutedOpIndices() to tell the method that the
384 // corresponding operand index is not pre-defined and that the method
385 // can pick any commutable operand.
386 static const unsigned CommuteAnyOperandIndex = ~0U;
388 /// This method commutes the operands of the given machine instruction MI.
390 /// The operands to be commuted are specified by their indices OpIdx1 and
391 /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
392 /// 'CommuteAnyOperandIndex', which means that the method is free to choose
393 /// any arbitrarily chosen commutable operand. If both arguments are set to
394 /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
395 /// operands; then commutes them if such operands could be found.
397 /// If NewMI is false, MI is modified in place and returned; otherwise, a
398 /// new machine instruction is created and returned.
400 /// Do not call this method for a non-commutable instruction or
401 /// for non-commuable operands.
402 /// Even though the instruction is commutable, the method may still
403 /// fail to commute the operands, null pointer is returned in such cases.
404 MachineInstr *
405 commuteInstruction(MachineInstr &MI, bool NewMI = false,
406 unsigned OpIdx1 = CommuteAnyOperandIndex,
407 unsigned OpIdx2 = CommuteAnyOperandIndex) const;
409 /// Returns true iff the routine could find two commutable operands in the
410 /// given machine instruction.
411 /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
412 /// If any of the INPUT values is set to the special value
413 /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
414 /// operand, then returns its index in the corresponding argument.
415 /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
416 /// looks for 2 commutable operands.
417 /// If INPUT values refer to some operands of MI, then the method simply
418 /// returns true if the corresponding operands are commutable and returns
419 /// false otherwise.
421 /// For example, calling this method this way:
422 /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
423 /// findCommutedOpIndices(MI, Op1, Op2);
424 /// can be interpreted as a query asking to find an operand that would be
425 /// commutable with the operand#1.
426 virtual bool findCommutedOpIndices(const MachineInstr &MI,
427 unsigned &SrcOpIdx1,
428 unsigned &SrcOpIdx2) const;
430 /// A pair composed of a register and a sub-register index.
431 /// Used to give some type checking when modeling Reg:SubReg.
432 struct RegSubRegPair {
433 unsigned Reg;
434 unsigned SubReg;
436 RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
437 : Reg(Reg), SubReg(SubReg) {}
439 bool operator==(const RegSubRegPair& P) const {
440 return Reg == P.Reg && SubReg == P.SubReg;
442 bool operator!=(const RegSubRegPair& P) const {
443 return !(*this == P);
447 /// A pair composed of a pair of a register and a sub-register index,
448 /// and another sub-register index.
449 /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
450 struct RegSubRegPairAndIdx : RegSubRegPair {
451 unsigned SubIdx;
453 RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0,
454 unsigned SubIdx = 0)
455 : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
458 /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
459 /// and \p DefIdx.
460 /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
461 /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
462 /// flag are not added to this list.
463 /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
464 /// two elements:
465 /// - %1:sub1, sub0
466 /// - %2<:0>, sub1
468 /// \returns true if it is possible to build such an input sequence
469 /// with the pair \p MI, \p DefIdx. False otherwise.
471 /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
473 /// \note The generic implementation does not provide any support for
474 /// MI.isRegSequenceLike(). In other words, one has to override
475 /// getRegSequenceLikeInputs for target specific instructions.
476 bool
477 getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
478 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
480 /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
481 /// and \p DefIdx.
482 /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
483 /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
484 /// - %1:sub1, sub0
486 /// \returns true if it is possible to build such an input sequence
487 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
488 /// False otherwise.
490 /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
492 /// \note The generic implementation does not provide any support for
493 /// MI.isExtractSubregLike(). In other words, one has to override
494 /// getExtractSubregLikeInputs for target specific instructions.
495 bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
496 RegSubRegPairAndIdx &InputReg) const;
498 /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
499 /// and \p DefIdx.
500 /// \p [out] BaseReg and \p [out] InsertedReg contain
501 /// the equivalent inputs of INSERT_SUBREG.
502 /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
503 /// - BaseReg: %0:sub0
504 /// - InsertedReg: %1:sub1, sub3
506 /// \returns true if it is possible to build such an input sequence
507 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
508 /// False otherwise.
510 /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
512 /// \note The generic implementation does not provide any support for
513 /// MI.isInsertSubregLike(). In other words, one has to override
514 /// getInsertSubregLikeInputs for target specific instructions.
515 bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
516 RegSubRegPair &BaseReg,
517 RegSubRegPairAndIdx &InsertedReg) const;
519 /// Return true if two machine instructions would produce identical values.
520 /// By default, this is only true when the two instructions
521 /// are deemed identical except for defs. If this function is called when the
522 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
523 /// aggressive checks.
524 virtual bool produceSameValue(const MachineInstr &MI0,
525 const MachineInstr &MI1,
526 const MachineRegisterInfo *MRI = nullptr) const;
528 /// \returns true if a branch from an instruction with opcode \p BranchOpc
529 /// bytes is capable of jumping to a position \p BrOffset bytes away.
530 virtual bool isBranchOffsetInRange(unsigned BranchOpc,
531 int64_t BrOffset) const {
532 llvm_unreachable("target did not implement");
535 /// \returns The block that branch instruction \p MI jumps to.
536 virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const {
537 llvm_unreachable("target did not implement");
540 /// Insert an unconditional indirect branch at the end of \p MBB to \p
541 /// NewDestBB. \p BrOffset indicates the offset of \p NewDestBB relative to
542 /// the offset of the position to insert the new branch.
544 /// \returns The number of bytes added to the block.
545 virtual unsigned insertIndirectBranch(MachineBasicBlock &MBB,
546 MachineBasicBlock &NewDestBB,
547 const DebugLoc &DL,
548 int64_t BrOffset = 0,
549 RegScavenger *RS = nullptr) const {
550 llvm_unreachable("target did not implement");
553 /// Analyze the branching code at the end of MBB, returning
554 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
555 /// implemented for a target). Upon success, this returns false and returns
556 /// with the following information in various cases:
558 /// 1. If this block ends with no branches (it just falls through to its succ)
559 /// just return false, leaving TBB/FBB null.
560 /// 2. If this block ends with only an unconditional branch, it sets TBB to be
561 /// the destination block.
562 /// 3. If this block ends with a conditional branch and it falls through to a
563 /// successor block, it sets TBB to be the branch destination block and a
564 /// list of operands that evaluate the condition. These operands can be
565 /// passed to other TargetInstrInfo methods to create new branches.
566 /// 4. If this block ends with a conditional branch followed by an
567 /// unconditional branch, it returns the 'true' destination in TBB, the
568 /// 'false' destination in FBB, and a list of operands that evaluate the
569 /// condition. These operands can be passed to other TargetInstrInfo
570 /// methods to create new branches.
572 /// Note that removeBranch and insertBranch must be implemented to support
573 /// cases where this method returns success.
575 /// If AllowModify is true, then this routine is allowed to modify the basic
576 /// block (e.g. delete instructions after the unconditional branch).
578 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
579 /// before calling this function.
580 virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
581 MachineBasicBlock *&FBB,
582 SmallVectorImpl<MachineOperand> &Cond,
583 bool AllowModify = false) const {
584 return true;
587 /// Represents a predicate at the MachineFunction level. The control flow a
588 /// MachineBranchPredicate represents is:
590 /// Reg = LHS `Predicate` RHS == ConditionDef
591 /// if Reg then goto TrueDest else goto FalseDest
593 struct MachineBranchPredicate {
594 enum ComparePredicate {
595 PRED_EQ, // True if two values are equal
596 PRED_NE, // True if two values are not equal
597 PRED_INVALID // Sentinel value
600 ComparePredicate Predicate = PRED_INVALID;
601 MachineOperand LHS = MachineOperand::CreateImm(0);
602 MachineOperand RHS = MachineOperand::CreateImm(0);
603 MachineBasicBlock *TrueDest = nullptr;
604 MachineBasicBlock *FalseDest = nullptr;
605 MachineInstr *ConditionDef = nullptr;
607 /// SingleUseCondition is true if ConditionDef is dead except for the
608 /// branch(es) at the end of the basic block.
610 bool SingleUseCondition = false;
612 explicit MachineBranchPredicate() = default;
615 /// Analyze the branching code at the end of MBB and parse it into the
616 /// MachineBranchPredicate structure if possible. Returns false on success
617 /// and true on failure.
619 /// If AllowModify is true, then this routine is allowed to modify the basic
620 /// block (e.g. delete instructions after the unconditional branch).
622 virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB,
623 MachineBranchPredicate &MBP,
624 bool AllowModify = false) const {
625 return true;
628 /// Remove the branching code at the end of the specific MBB.
629 /// This is only invoked in cases where AnalyzeBranch returns success. It
630 /// returns the number of instructions that were removed.
631 /// If \p BytesRemoved is non-null, report the change in code size from the
632 /// removed instructions.
633 virtual unsigned removeBranch(MachineBasicBlock &MBB,
634 int *BytesRemoved = nullptr) const {
635 llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
638 /// Insert branch code into the end of the specified MachineBasicBlock. The
639 /// operands to this method are the same as those returned by AnalyzeBranch.
640 /// This is only invoked in cases where AnalyzeBranch returns success. It
641 /// returns the number of instructions inserted. If \p BytesAdded is non-null,
642 /// report the change in code size from the added instructions.
644 /// It is also invoked by tail merging to add unconditional branches in
645 /// cases where AnalyzeBranch doesn't apply because there was no original
646 /// branch to analyze. At least this much must be implemented, else tail
647 /// merging needs to be disabled.
649 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
650 /// before calling this function.
651 virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
652 MachineBasicBlock *FBB,
653 ArrayRef<MachineOperand> Cond,
654 const DebugLoc &DL,
655 int *BytesAdded = nullptr) const {
656 llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
659 unsigned insertUnconditionalBranch(MachineBasicBlock &MBB,
660 MachineBasicBlock *DestBB,
661 const DebugLoc &DL,
662 int *BytesAdded = nullptr) const {
663 return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
664 BytesAdded);
667 /// Object returned by analyzeLoopForPipelining. Allows software pipelining
668 /// implementations to query attributes of the loop being pipelined and to
669 /// apply target-specific updates to the loop once pipelining is complete.
670 class PipelinerLoopInfo {
671 public:
672 virtual ~PipelinerLoopInfo();
673 /// Return true if the given instruction should not be pipelined and should
674 /// be ignored. An example could be a loop comparison, or induction variable
675 /// update with no users being pipelined.
676 virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0;
678 /// Create a condition to determine if the trip count of the loop is greater
679 /// than TC.
681 /// If the trip count is statically known to be greater than TC, return
682 /// true. If the trip count is statically known to be not greater than TC,
683 /// return false. Otherwise return nullopt and fill out Cond with the test
684 /// condition.
685 virtual Optional<bool>
686 createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
687 SmallVectorImpl<MachineOperand> &Cond) = 0;
689 /// Modify the loop such that the trip count is
690 /// OriginalTC + TripCountAdjust.
691 virtual void adjustTripCount(int TripCountAdjust) = 0;
693 /// Called when the loop's preheader has been modified to NewPreheader.
694 virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0;
696 /// Called when the loop is being removed. Any instructions in the preheader
697 /// should be removed.
699 /// Once this function is called, no other functions on this object are
700 /// valid; the loop has been removed.
701 virtual void disposed() = 0;
704 /// Analyze loop L, which must be a single-basic-block loop, and if the
705 /// conditions can be understood enough produce a PipelinerLoopInfo object.
706 virtual std::unique_ptr<PipelinerLoopInfo>
707 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {
708 return nullptr;
711 /// Analyze the loop code, return true if it cannot be understoo. Upon
712 /// success, this function returns false and returns information about the
713 /// induction variable and compare instruction used at the end.
714 virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
715 MachineInstr *&CmpInst) const {
716 return true;
719 /// Generate code to reduce the loop iteration by one and check if the loop
720 /// is finished. Return the value/register of the new loop count. We need
721 /// this function when peeling off one or more iterations of a loop. This
722 /// function assumes the nth iteration is peeled first.
723 virtual unsigned reduceLoopCount(MachineBasicBlock &MBB,
724 MachineBasicBlock &PreHeader,
725 MachineInstr *IndVar, MachineInstr &Cmp,
726 SmallVectorImpl<MachineOperand> &Cond,
727 SmallVectorImpl<MachineInstr *> &PrevInsts,
728 unsigned Iter, unsigned MaxIter) const {
729 llvm_unreachable("Target didn't implement ReduceLoopCount");
732 /// Delete the instruction OldInst and everything after it, replacing it with
733 /// an unconditional branch to NewDest. This is used by the tail merging pass.
734 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
735 MachineBasicBlock *NewDest) const;
737 /// Return true if it's legal to split the given basic
738 /// block at the specified instruction (i.e. instruction would be the start
739 /// of a new basic block).
740 virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
741 MachineBasicBlock::iterator MBBI) const {
742 return true;
745 /// Return true if it's profitable to predicate
746 /// instructions with accumulated instruction latency of "NumCycles"
747 /// of the specified basic block, where the probability of the instructions
748 /// being executed is given by Probability, and Confidence is a measure
749 /// of our confidence that it will be properly predicted.
750 virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
751 unsigned ExtraPredCycles,
752 BranchProbability Probability) const {
753 return false;
756 /// Second variant of isProfitableToIfCvt. This one
757 /// checks for the case where two basic blocks from true and false path
758 /// of a if-then-else (diamond) are predicated on mutally exclusive
759 /// predicates, where the probability of the true path being taken is given
760 /// by Probability, and Confidence is a measure of our confidence that it
761 /// will be properly predicted.
762 virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
763 unsigned ExtraTCycles,
764 MachineBasicBlock &FMBB, unsigned NumFCycles,
765 unsigned ExtraFCycles,
766 BranchProbability Probability) const {
767 return false;
770 /// Return true if it's profitable for if-converter to duplicate instructions
771 /// of specified accumulated instruction latencies in the specified MBB to
772 /// enable if-conversion.
773 /// The probability of the instructions being executed is given by
774 /// Probability, and Confidence is a measure of our confidence that it
775 /// will be properly predicted.
776 virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
777 unsigned NumCycles,
778 BranchProbability Probability) const {
779 return false;
782 /// Return the increase in code size needed to predicate a contiguous run of
783 /// NumInsts instructions.
784 virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF,
785 unsigned NumInsts) const {
786 return 0;
789 /// Return an estimate for the code size reduction (in bytes) which will be
790 /// caused by removing the given branch instruction during if-conversion.
791 virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const {
792 return getInstSizeInBytes(MI);
795 /// Return true if it's profitable to unpredicate
796 /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
797 /// exclusive predicates.
798 /// e.g.
799 /// subeq r0, r1, #1
800 /// addne r0, r1, #1
801 /// =>
802 /// sub r0, r1, #1
803 /// addne r0, r1, #1
805 /// This may be profitable is conditional instructions are always executed.
806 virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
807 MachineBasicBlock &FMBB) const {
808 return false;
811 /// Return true if it is possible to insert a select
812 /// instruction that chooses between TrueReg and FalseReg based on the
813 /// condition code in Cond.
815 /// When successful, also return the latency in cycles from TrueReg,
816 /// FalseReg, and Cond to the destination register. In most cases, a select
817 /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
819 /// Some x86 implementations have 2-cycle cmov instructions.
821 /// @param MBB Block where select instruction would be inserted.
822 /// @param Cond Condition returned by AnalyzeBranch.
823 /// @param TrueReg Virtual register to select when Cond is true.
824 /// @param FalseReg Virtual register to select when Cond is false.
825 /// @param CondCycles Latency from Cond+Branch to select output.
826 /// @param TrueCycles Latency from TrueReg to select output.
827 /// @param FalseCycles Latency from FalseReg to select output.
828 virtual bool canInsertSelect(const MachineBasicBlock &MBB,
829 ArrayRef<MachineOperand> Cond, unsigned TrueReg,
830 unsigned FalseReg, int &CondCycles,
831 int &TrueCycles, int &FalseCycles) const {
832 return false;
835 /// Insert a select instruction into MBB before I that will copy TrueReg to
836 /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
838 /// This function can only be called after canInsertSelect() returned true.
839 /// The condition in Cond comes from AnalyzeBranch, and it can be assumed
840 /// that the same flags or registers required by Cond are available at the
841 /// insertion point.
843 /// @param MBB Block where select instruction should be inserted.
844 /// @param I Insertion point.
845 /// @param DL Source location for debugging.
846 /// @param DstReg Virtual register to be defined by select instruction.
847 /// @param Cond Condition as computed by AnalyzeBranch.
848 /// @param TrueReg Virtual register to copy when Cond is true.
849 /// @param FalseReg Virtual register to copy when Cons is false.
850 virtual void insertSelect(MachineBasicBlock &MBB,
851 MachineBasicBlock::iterator I, const DebugLoc &DL,
852 unsigned DstReg, ArrayRef<MachineOperand> Cond,
853 unsigned TrueReg, unsigned FalseReg) const {
854 llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
857 /// Analyze the given select instruction, returning true if
858 /// it cannot be understood. It is assumed that MI->isSelect() is true.
860 /// When successful, return the controlling condition and the operands that
861 /// determine the true and false result values.
863 /// Result = SELECT Cond, TrueOp, FalseOp
865 /// Some targets can optimize select instructions, for example by predicating
866 /// the instruction defining one of the operands. Such targets should set
867 /// Optimizable.
869 /// @param MI Select instruction to analyze.
870 /// @param Cond Condition controlling the select.
871 /// @param TrueOp Operand number of the value selected when Cond is true.
872 /// @param FalseOp Operand number of the value selected when Cond is false.
873 /// @param Optimizable Returned as true if MI is optimizable.
874 /// @returns False on success.
875 virtual bool analyzeSelect(const MachineInstr &MI,
876 SmallVectorImpl<MachineOperand> &Cond,
877 unsigned &TrueOp, unsigned &FalseOp,
878 bool &Optimizable) const {
879 assert(MI.getDesc().isSelect() && "MI must be a select instruction");
880 return true;
883 /// Given a select instruction that was understood by
884 /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
885 /// merging it with one of its operands. Returns NULL on failure.
887 /// When successful, returns the new select instruction. The client is
888 /// responsible for deleting MI.
890 /// If both sides of the select can be optimized, PreferFalse is used to pick
891 /// a side.
893 /// @param MI Optimizable select instruction.
894 /// @param NewMIs Set that record all MIs in the basic block up to \p
895 /// MI. Has to be updated with any newly created MI or deleted ones.
896 /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
897 /// @returns Optimized instruction or NULL.
898 virtual MachineInstr *optimizeSelect(MachineInstr &MI,
899 SmallPtrSetImpl<MachineInstr *> &NewMIs,
900 bool PreferFalse = false) const {
901 // This function must be implemented if Optimizable is ever set.
902 llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
905 /// Emit instructions to copy a pair of physical registers.
907 /// This function should support copies within any legal register class as
908 /// well as any cross-class copies created during instruction selection.
910 /// The source and destination registers may overlap, which may require a
911 /// careful implementation when multiple copy instructions are required for
912 /// large registers. See for example the ARM target.
913 virtual void copyPhysReg(MachineBasicBlock &MBB,
914 MachineBasicBlock::iterator MI, const DebugLoc &DL,
915 unsigned DestReg, unsigned SrcReg,
916 bool KillSrc) const {
917 llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
920 protected:
921 /// Target-dependent implemenation for IsCopyInstr.
922 /// If the specific machine instruction is a instruction that moves/copies
923 /// value from one register to another register return true along with
924 /// @Source machine operand and @Destination machine operand.
925 virtual bool isCopyInstrImpl(const MachineInstr &MI,
926 const MachineOperand *&Source,
927 const MachineOperand *&Destination) const {
928 return false;
931 public:
932 /// If the specific machine instruction is a instruction that moves/copies
933 /// value from one register to another register return true along with
934 /// @Source machine operand and @Destination machine operand.
935 /// For COPY-instruction the method naturally returns true, for all other
936 /// instructions the method calls target-dependent implementation.
937 bool isCopyInstr(const MachineInstr &MI, const MachineOperand *&Source,
938 const MachineOperand *&Destination) const {
939 if (MI.isCopy()) {
940 Destination = &MI.getOperand(0);
941 Source = &MI.getOperand(1);
942 return true;
944 return isCopyInstrImpl(MI, Source, Destination);
947 /// Store the specified register of the given register class to the specified
948 /// stack frame index. The store instruction is to be added to the given
949 /// machine basic block before the specified machine instruction. If isKill
950 /// is true, the register operand is the last use and must be marked kill.
951 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
952 MachineBasicBlock::iterator MI,
953 unsigned SrcReg, bool isKill, int FrameIndex,
954 const TargetRegisterClass *RC,
955 const TargetRegisterInfo *TRI) const {
956 llvm_unreachable("Target didn't implement "
957 "TargetInstrInfo::storeRegToStackSlot!");
960 /// Load the specified register of the given register class from the specified
961 /// stack frame index. The load instruction is to be added to the given
962 /// machine basic block before the specified machine instruction.
963 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
964 MachineBasicBlock::iterator MI,
965 unsigned DestReg, int FrameIndex,
966 const TargetRegisterClass *RC,
967 const TargetRegisterInfo *TRI) const {
968 llvm_unreachable("Target didn't implement "
969 "TargetInstrInfo::loadRegFromStackSlot!");
972 /// This function is called for all pseudo instructions
973 /// that remain after register allocation. Many pseudo instructions are
974 /// created to help register allocation. This is the place to convert them
975 /// into real instructions. The target can edit MI in place, or it can insert
976 /// new instructions and erase MI. The function should return true if
977 /// anything was changed.
978 virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }
980 /// Check whether the target can fold a load that feeds a subreg operand
981 /// (or a subreg operand that feeds a store).
982 /// For example, X86 may want to return true if it can fold
983 /// movl (%esp), %eax
984 /// subb, %al, ...
985 /// Into:
986 /// subb (%esp), ...
988 /// Ideally, we'd like the target implementation of foldMemoryOperand() to
989 /// reject subregs - but since this behavior used to be enforced in the
990 /// target-independent code, moving this responsibility to the targets
991 /// has the potential of causing nasty silent breakage in out-of-tree targets.
992 virtual bool isSubregFoldable() const { return false; }
994 /// Attempt to fold a load or store of the specified stack
995 /// slot into the specified machine instruction for the specified operand(s).
996 /// If this is possible, a new instruction is returned with the specified
997 /// operand folded, otherwise NULL is returned.
998 /// The new instruction is inserted before MI, and the client is responsible
999 /// for removing the old instruction.
1000 /// If VRM is passed, the assigned physregs can be inspected by target to
1001 /// decide on using an opcode (note that those assignments can still change).
1002 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
1003 int FI,
1004 LiveIntervals *LIS = nullptr,
1005 VirtRegMap *VRM = nullptr) const;
1007 /// Same as the previous version except it allows folding of any load and
1008 /// store from / to any address, not just from a specific stack slot.
1009 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
1010 MachineInstr &LoadMI,
1011 LiveIntervals *LIS = nullptr) const;
1013 /// Return true when there is potentially a faster code sequence
1014 /// for an instruction chain ending in \p Root. All potential patterns are
1015 /// returned in the \p Pattern vector. Pattern should be sorted in priority
1016 /// order since the pattern evaluator stops checking as soon as it finds a
1017 /// faster sequence.
1018 /// \param Root - Instruction that could be combined with one of its operands
1019 /// \param Patterns - Vector of possible combination patterns
1020 virtual bool getMachineCombinerPatterns(
1021 MachineInstr &Root,
1022 SmallVectorImpl<MachineCombinerPattern> &Patterns) const;
1024 /// Return true when a code sequence can improve throughput. It
1025 /// should be called only for instructions in loops.
1026 /// \param Pattern - combiner pattern
1027 virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const;
1029 /// Return true if the input \P Inst is part of a chain of dependent ops
1030 /// that are suitable for reassociation, otherwise return false.
1031 /// If the instruction's operands must be commuted to have a previous
1032 /// instruction of the same type define the first source operand, \P Commuted
1033 /// will be set to true.
1034 bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
1036 /// Return true when \P Inst is both associative and commutative.
1037 virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const {
1038 return false;
1041 /// Return true when \P Inst has reassociable operands in the same \P MBB.
1042 virtual bool hasReassociableOperands(const MachineInstr &Inst,
1043 const MachineBasicBlock *MBB) const;
1045 /// Return true when \P Inst has reassociable sibling.
1046 bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const;
1048 /// When getMachineCombinerPatterns() finds patterns, this function generates
1049 /// the instructions that could replace the original code sequence. The client
1050 /// has to decide whether the actual replacement is beneficial or not.
1051 /// \param Root - Instruction that could be combined with one of its operands
1052 /// \param Pattern - Combination pattern for Root
1053 /// \param InsInstrs - Vector of new instructions that implement P
1054 /// \param DelInstrs - Old instructions, including Root, that could be
1055 /// replaced by InsInstr
1056 /// \param InstIdxForVirtReg - map of virtual register to instruction in
1057 /// InsInstr that defines it
1058 virtual void genAlternativeCodeSequence(
1059 MachineInstr &Root, MachineCombinerPattern Pattern,
1060 SmallVectorImpl<MachineInstr *> &InsInstrs,
1061 SmallVectorImpl<MachineInstr *> &DelInstrs,
1062 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const;
1064 /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
1065 /// reduce critical path length.
1066 void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
1067 MachineCombinerPattern Pattern,
1068 SmallVectorImpl<MachineInstr *> &InsInstrs,
1069 SmallVectorImpl<MachineInstr *> &DelInstrs,
1070 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
1072 /// This is an architecture-specific helper function of reassociateOps.
1073 /// Set special operand attributes for new instructions after reassociation.
1074 virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
1075 MachineInstr &NewMI1,
1076 MachineInstr &NewMI2) const {}
1078 /// Return true when a target supports MachineCombiner.
1079 virtual bool useMachineCombiner() const { return false; }
1081 /// Return true if the given SDNode can be copied during scheduling
1082 /// even if it has glue.
1083 virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }
1085 protected:
1086 /// Target-dependent implementation for foldMemoryOperand.
1087 /// Target-independent code in foldMemoryOperand will
1088 /// take care of adding a MachineMemOperand to the newly created instruction.
1089 /// The instruction and any auxiliary instructions necessary will be inserted
1090 /// at InsertPt.
1091 virtual MachineInstr *
1092 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
1093 ArrayRef<unsigned> Ops,
1094 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1095 LiveIntervals *LIS = nullptr,
1096 VirtRegMap *VRM = nullptr) const {
1097 return nullptr;
1100 /// Target-dependent implementation for foldMemoryOperand.
1101 /// Target-independent code in foldMemoryOperand will
1102 /// take care of adding a MachineMemOperand to the newly created instruction.
1103 /// The instruction and any auxiliary instructions necessary will be inserted
1104 /// at InsertPt.
1105 virtual MachineInstr *foldMemoryOperandImpl(
1106 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
1107 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1108 LiveIntervals *LIS = nullptr) const {
1109 return nullptr;
1112 /// Target-dependent implementation of getRegSequenceInputs.
1114 /// \returns true if it is possible to build the equivalent
1115 /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
1117 /// \pre MI.isRegSequenceLike().
1119 /// \see TargetInstrInfo::getRegSequenceInputs.
1120 virtual bool getRegSequenceLikeInputs(
1121 const MachineInstr &MI, unsigned DefIdx,
1122 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1123 return false;
1126 /// Target-dependent implementation of getExtractSubregInputs.
1128 /// \returns true if it is possible to build the equivalent
1129 /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1131 /// \pre MI.isExtractSubregLike().
1133 /// \see TargetInstrInfo::getExtractSubregInputs.
1134 virtual bool getExtractSubregLikeInputs(const MachineInstr &MI,
1135 unsigned DefIdx,
1136 RegSubRegPairAndIdx &InputReg) const {
1137 return false;
1140 /// Target-dependent implementation of getInsertSubregInputs.
1142 /// \returns true if it is possible to build the equivalent
1143 /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
1145 /// \pre MI.isInsertSubregLike().
1147 /// \see TargetInstrInfo::getInsertSubregInputs.
1148 virtual bool
1149 getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
1150 RegSubRegPair &BaseReg,
1151 RegSubRegPairAndIdx &InsertedReg) const {
1152 return false;
1155 public:
1156 /// getAddressSpaceForPseudoSourceKind - Given the kind of memory
1157 /// (e.g. stack) the target returns the corresponding address space.
1158 virtual unsigned
1159 getAddressSpaceForPseudoSourceKind(unsigned Kind) const {
1160 return 0;
1163 /// unfoldMemoryOperand - Separate a single instruction which folded a load or
1164 /// a store or a load and a store into two or more instruction. If this is
1165 /// possible, returns true as well as the new instructions by reference.
1166 virtual bool
1167 unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
1168 bool UnfoldLoad, bool UnfoldStore,
1169 SmallVectorImpl<MachineInstr *> &NewMIs) const {
1170 return false;
1173 virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
1174 SmallVectorImpl<SDNode *> &NewNodes) const {
1175 return false;
1178 /// Returns the opcode of the would be new
1179 /// instruction after load / store are unfolded from an instruction of the
1180 /// specified opcode. It returns zero if the specified unfolding is not
1181 /// possible. If LoadRegIndex is non-null, it is filled in with the operand
1182 /// index of the operand which will hold the register holding the loaded
1183 /// value.
1184 virtual unsigned
1185 getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
1186 unsigned *LoadRegIndex = nullptr) const {
1187 return 0;
1190 /// This is used by the pre-regalloc scheduler to determine if two loads are
1191 /// loading from the same base address. It should only return true if the base
1192 /// pointers are the same and the only differences between the two addresses
1193 /// are the offset. It also returns the offsets by reference.
1194 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1195 int64_t &Offset1,
1196 int64_t &Offset2) const {
1197 return false;
1200 /// This is a used by the pre-regalloc scheduler to determine (in conjunction
1201 /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
1202 /// On some targets if two loads are loading from
1203 /// addresses in the same cache line, it's better if they are scheduled
1204 /// together. This function takes two integers that represent the load offsets
1205 /// from the common base address. It returns true if it decides it's desirable
1206 /// to schedule the two loads together. "NumLoads" is the number of loads that
1207 /// have already been scheduled after Load1.
1208 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1209 int64_t Offset1, int64_t Offset2,
1210 unsigned NumLoads) const {
1211 return false;
1214 /// Get the base operand and byte offset of an instruction that reads/writes
1215 /// memory.
1216 virtual bool getMemOperandWithOffset(const MachineInstr &MI,
1217 const MachineOperand *&BaseOp,
1218 int64_t &Offset,
1219 const TargetRegisterInfo *TRI) const {
1220 return false;
1223 /// Return true if the instruction contains a base register and offset. If
1224 /// true, the function also sets the operand position in the instruction
1225 /// for the base register and offset.
1226 virtual bool getBaseAndOffsetPosition(const MachineInstr &MI,
1227 unsigned &BasePos,
1228 unsigned &OffsetPos) const {
1229 return false;
1232 /// If the instruction is an increment of a constant value, return the amount.
1233 virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
1234 return false;
1237 /// Returns true if the two given memory operations should be scheduled
1238 /// adjacent. Note that you have to add:
1239 /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1240 /// or
1241 /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1242 /// to TargetPassConfig::createMachineScheduler() to have an effect.
1243 virtual bool shouldClusterMemOps(const MachineOperand &BaseOp1,
1244 const MachineOperand &BaseOp2,
1245 unsigned NumLoads) const {
1246 llvm_unreachable("target did not implement shouldClusterMemOps()");
1249 /// Reverses the branch condition of the specified condition list,
1250 /// returning false on success and true if it cannot be reversed.
1251 virtual bool
1252 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
1253 return true;
1256 /// Insert a noop into the instruction stream at the specified point.
1257 virtual void insertNoop(MachineBasicBlock &MBB,
1258 MachineBasicBlock::iterator MI) const;
1260 /// Return the noop instruction to use for a noop.
1261 virtual void getNoop(MCInst &NopInst) const;
1263 /// Return true for post-incremented instructions.
1264 virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }
1266 /// Returns true if the instruction is already predicated.
1267 virtual bool isPredicated(const MachineInstr &MI) const { return false; }
1269 /// Returns true if the instruction is a
1270 /// terminator instruction that has not been predicated.
1271 virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const;
1273 /// Returns true if MI is an unconditional tail call.
1274 virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
1275 return false;
1278 /// Returns true if the tail call can be made conditional on BranchCond.
1279 virtual bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
1280 const MachineInstr &TailCall) const {
1281 return false;
1284 /// Replace the conditional branch in MBB with a conditional tail call.
1285 virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB,
1286 SmallVectorImpl<MachineOperand> &Cond,
1287 const MachineInstr &TailCall) const {
1288 llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
1291 /// Convert the instruction into a predicated instruction.
1292 /// It returns true if the operation was successful.
1293 virtual bool PredicateInstruction(MachineInstr &MI,
1294 ArrayRef<MachineOperand> Pred) const;
1296 /// Returns true if the first specified predicate
1297 /// subsumes the second, e.g. GE subsumes GT.
1298 virtual bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
1299 ArrayRef<MachineOperand> Pred2) const {
1300 return false;
1303 /// If the specified instruction defines any predicate
1304 /// or condition code register(s) used for predication, returns true as well
1305 /// as the definition predicate(s) by reference.
1306 virtual bool DefinesPredicate(MachineInstr &MI,
1307 std::vector<MachineOperand> &Pred) const {
1308 return false;
1311 /// Return true if the specified instruction can be predicated.
1312 /// By default, this returns true for every instruction with a
1313 /// PredicateOperand.
1314 virtual bool isPredicable(const MachineInstr &MI) const {
1315 return MI.getDesc().isPredicable();
1318 /// Return true if it's safe to move a machine
1319 /// instruction that defines the specified register class.
1320 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
1321 return true;
1324 /// Test if the given instruction should be considered a scheduling boundary.
1325 /// This primarily includes labels and terminators.
1326 virtual bool isSchedulingBoundary(const MachineInstr &MI,
1327 const MachineBasicBlock *MBB,
1328 const MachineFunction &MF) const;
1330 /// Measure the specified inline asm to determine an approximation of its
1331 /// length.
1332 virtual unsigned getInlineAsmLength(
1333 const char *Str, const MCAsmInfo &MAI,
1334 const TargetSubtargetInfo *STI = nullptr) const;
1336 /// Allocate and return a hazard recognizer to use for this target when
1337 /// scheduling the machine instructions before register allocation.
1338 virtual ScheduleHazardRecognizer *
1339 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1340 const ScheduleDAG *DAG) const;
1342 /// Allocate and return a hazard recognizer to use for this target when
1343 /// scheduling the machine instructions before register allocation.
1344 virtual ScheduleHazardRecognizer *
1345 CreateTargetMIHazardRecognizer(const InstrItineraryData *,
1346 const ScheduleDAG *DAG) const;
1348 /// Allocate and return a hazard recognizer to use for this target when
1349 /// scheduling the machine instructions after register allocation.
1350 virtual ScheduleHazardRecognizer *
1351 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *,
1352 const ScheduleDAG *DAG) const;
1354 /// Allocate and return a hazard recognizer to use for by non-scheduling
1355 /// passes.
1356 virtual ScheduleHazardRecognizer *
1357 CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
1358 return nullptr;
1361 /// Provide a global flag for disabling the PreRA hazard recognizer that
1362 /// targets may choose to honor.
1363 bool usePreRAHazardRecognizer() const;
1365 /// For a comparison instruction, return the source registers
1366 /// in SrcReg and SrcReg2 if having two register operands, and the value it
1367 /// compares against in CmpValue. Return true if the comparison instruction
1368 /// can be analyzed.
1369 virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
1370 unsigned &SrcReg2, int &Mask, int &Value) const {
1371 return false;
1374 /// See if the comparison instruction can be converted
1375 /// into something more efficient. E.g., on ARM most instructions can set the
1376 /// flags register, obviating the need for a separate CMP.
1377 virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
1378 unsigned SrcReg2, int Mask, int Value,
1379 const MachineRegisterInfo *MRI) const {
1380 return false;
1382 virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }
1384 /// Try to remove the load by folding it to a register operand at the use.
1385 /// We fold the load instructions if and only if the
1386 /// def and use are in the same BB. We only look at one load and see
1387 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
1388 /// defined by the load we are trying to fold. DefMI returns the machine
1389 /// instruction that defines FoldAsLoadDefReg, and the function returns
1390 /// the machine instruction generated due to folding.
1391 virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
1392 const MachineRegisterInfo *MRI,
1393 unsigned &FoldAsLoadDefReg,
1394 MachineInstr *&DefMI) const {
1395 return nullptr;
1398 /// 'Reg' is known to be defined by a move immediate instruction,
1399 /// try to fold the immediate into the use instruction.
1400 /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
1401 /// then the caller may assume that DefMI has been erased from its parent
1402 /// block. The caller may assume that it will not be erased by this
1403 /// function otherwise.
1404 virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
1405 unsigned Reg, MachineRegisterInfo *MRI) const {
1406 return false;
1409 /// Return the number of u-operations the given machine
1410 /// instruction will be decoded to on the target cpu. The itinerary's
1411 /// IssueWidth is the number of microops that can be dispatched each
1412 /// cycle. An instruction with zero microops takes no dispatch resources.
1413 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
1414 const MachineInstr &MI) const;
1416 /// Return true for pseudo instructions that don't consume any
1417 /// machine resources in their current form. These are common cases that the
1418 /// scheduler should consider free, rather than conservatively handling them
1419 /// as instructions with no itinerary.
1420 bool isZeroCost(unsigned Opcode) const {
1421 return Opcode <= TargetOpcode::COPY;
1424 virtual int getOperandLatency(const InstrItineraryData *ItinData,
1425 SDNode *DefNode, unsigned DefIdx,
1426 SDNode *UseNode, unsigned UseIdx) const;
1428 /// Compute and return the use operand latency of a given pair of def and use.
1429 /// In most cases, the static scheduling itinerary was enough to determine the
1430 /// operand latency. But it may not be possible for instructions with variable
1431 /// number of defs / uses.
1433 /// This is a raw interface to the itinerary that may be directly overridden
1434 /// by a target. Use computeOperandLatency to get the best estimate of
1435 /// latency.
1436 virtual int getOperandLatency(const InstrItineraryData *ItinData,
1437 const MachineInstr &DefMI, unsigned DefIdx,
1438 const MachineInstr &UseMI,
1439 unsigned UseIdx) const;
1441 /// Compute the instruction latency of a given instruction.
1442 /// If the instruction has higher cost when predicated, it's returned via
1443 /// PredCost.
1444 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
1445 const MachineInstr &MI,
1446 unsigned *PredCost = nullptr) const;
1448 virtual unsigned getPredicationCost(const MachineInstr &MI) const;
1450 virtual int getInstrLatency(const InstrItineraryData *ItinData,
1451 SDNode *Node) const;
1453 /// Return the default expected latency for a def based on its opcode.
1454 unsigned defaultDefLatency(const MCSchedModel &SchedModel,
1455 const MachineInstr &DefMI) const;
1457 int computeDefOperandLatency(const InstrItineraryData *ItinData,
1458 const MachineInstr &DefMI) const;
1460 /// Return true if this opcode has high latency to its result.
1461 virtual bool isHighLatencyDef(int opc) const { return false; }
1463 /// Compute operand latency between a def of 'Reg'
1464 /// and a use in the current loop. Return true if the target considered
1465 /// it 'high'. This is used by optimization passes such as machine LICM to
1466 /// determine whether it makes sense to hoist an instruction out even in a
1467 /// high register pressure situation.
1468 virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
1469 const MachineRegisterInfo *MRI,
1470 const MachineInstr &DefMI, unsigned DefIdx,
1471 const MachineInstr &UseMI,
1472 unsigned UseIdx) const {
1473 return false;
1476 /// Compute operand latency of a def of 'Reg'. Return true
1477 /// if the target considered it 'low'.
1478 virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
1479 const MachineInstr &DefMI,
1480 unsigned DefIdx) const;
1482 /// Perform target-specific instruction verification.
1483 virtual bool verifyInstruction(const MachineInstr &MI,
1484 StringRef &ErrInfo) const {
1485 return true;
1488 /// Return the current execution domain and bit mask of
1489 /// possible domains for instruction.
1491 /// Some micro-architectures have multiple execution domains, and multiple
1492 /// opcodes that perform the same operation in different domains. For
1493 /// example, the x86 architecture provides the por, orps, and orpd
1494 /// instructions that all do the same thing. There is a latency penalty if a
1495 /// register is written in one domain and read in another.
1497 /// This function returns a pair (domain, mask) containing the execution
1498 /// domain of MI, and a bit mask of possible domains. The setExecutionDomain
1499 /// function can be used to change the opcode to one of the domains in the
1500 /// bit mask. Instructions whose execution domain can't be changed should
1501 /// return a 0 mask.
1503 /// The execution domain numbers don't have any special meaning except domain
1504 /// 0 is used for instructions that are not associated with any interesting
1505 /// execution domain.
1507 virtual std::pair<uint16_t, uint16_t>
1508 getExecutionDomain(const MachineInstr &MI) const {
1509 return std::make_pair(0, 0);
1512 /// Change the opcode of MI to execute in Domain.
1514 /// The bit (1 << Domain) must be set in the mask returned from
1515 /// getExecutionDomain(MI).
1516 virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}
1518 /// Returns the preferred minimum clearance
1519 /// before an instruction with an unwanted partial register update.
1521 /// Some instructions only write part of a register, and implicitly need to
1522 /// read the other parts of the register. This may cause unwanted stalls
1523 /// preventing otherwise unrelated instructions from executing in parallel in
1524 /// an out-of-order CPU.
1526 /// For example, the x86 instruction cvtsi2ss writes its result to bits
1527 /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
1528 /// the instruction needs to wait for the old value of the register to become
1529 /// available:
1531 /// addps %xmm1, %xmm0
1532 /// movaps %xmm0, (%rax)
1533 /// cvtsi2ss %rbx, %xmm0
1535 /// In the code above, the cvtsi2ss instruction needs to wait for the addps
1536 /// instruction before it can issue, even though the high bits of %xmm0
1537 /// probably aren't needed.
1539 /// This hook returns the preferred clearance before MI, measured in
1540 /// instructions. Other defs of MI's operand OpNum are avoided in the last N
1541 /// instructions before MI. It should only return a positive value for
1542 /// unwanted dependencies. If the old bits of the defined register have
1543 /// useful values, or if MI is determined to otherwise read the dependency,
1544 /// the hook should return 0.
1546 /// The unwanted dependency may be handled by:
1548 /// 1. Allocating the same register for an MI def and use. That makes the
1549 /// unwanted dependency identical to a required dependency.
1551 /// 2. Allocating a register for the def that has no defs in the previous N
1552 /// instructions.
1554 /// 3. Calling breakPartialRegDependency() with the same arguments. This
1555 /// allows the target to insert a dependency breaking instruction.
1557 virtual unsigned
1558 getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
1559 const TargetRegisterInfo *TRI) const {
1560 // The default implementation returns 0 for no partial register dependency.
1561 return 0;
1564 /// Return the minimum clearance before an instruction that reads an
1565 /// unused register.
1567 /// For example, AVX instructions may copy part of a register operand into
1568 /// the unused high bits of the destination register.
1570 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
1572 /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
1573 /// false dependence on any previous write to %xmm0.
1575 /// This hook works similarly to getPartialRegUpdateClearance, except that it
1576 /// does not take an operand index. Instead sets \p OpNum to the index of the
1577 /// unused register.
1578 virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
1579 const TargetRegisterInfo *TRI) const {
1580 // The default implementation returns 0 for no undef register dependency.
1581 return 0;
1584 /// Insert a dependency-breaking instruction
1585 /// before MI to eliminate an unwanted dependency on OpNum.
1587 /// If it wasn't possible to avoid a def in the last N instructions before MI
1588 /// (see getPartialRegUpdateClearance), this hook will be called to break the
1589 /// unwanted dependency.
1591 /// On x86, an xorps instruction can be used as a dependency breaker:
1593 /// addps %xmm1, %xmm0
1594 /// movaps %xmm0, (%rax)
1595 /// xorps %xmm0, %xmm0
1596 /// cvtsi2ss %rbx, %xmm0
1598 /// An <imp-kill> operand should be added to MI if an instruction was
1599 /// inserted. This ties the instructions together in the post-ra scheduler.
1601 virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
1602 const TargetRegisterInfo *TRI) const {}
1604 /// Create machine specific model for scheduling.
1605 virtual DFAPacketizer *
1606 CreateTargetScheduleState(const TargetSubtargetInfo &) const {
1607 return nullptr;
1610 /// Sometimes, it is possible for the target
1611 /// to tell, even without aliasing information, that two MIs access different
1612 /// memory addresses. This function returns true if two MIs access different
1613 /// memory addresses and false otherwise.
1615 /// Assumes any physical registers used to compute addresses have the same
1616 /// value for both instructions. (This is the most useful assumption for
1617 /// post-RA scheduling.)
1619 /// See also MachineInstr::mayAlias, which is implemented on top of this
1620 /// function.
1621 virtual bool
1622 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
1623 const MachineInstr &MIb) const {
1624 assert((MIa.mayLoad() || MIa.mayStore()) &&
1625 "MIa must load from or modify a memory location");
1626 assert((MIb.mayLoad() || MIb.mayStore()) &&
1627 "MIb must load from or modify a memory location");
1628 return false;
1631 /// Return the value to use for the MachineCSE's LookAheadLimit,
1632 /// which is a heuristic used for CSE'ing phys reg defs.
1633 virtual unsigned getMachineCSELookAheadLimit() const {
1634 // The default lookahead is small to prevent unprofitable quadratic
1635 // behavior.
1636 return 5;
1639 /// Return an array that contains the ids of the target indices (used for the
1640 /// TargetIndex machine operand) and their names.
1642 /// MIR Serialization is able to serialize only the target indices that are
1643 /// defined by this method.
1644 virtual ArrayRef<std::pair<int, const char *>>
1645 getSerializableTargetIndices() const {
1646 return None;
1649 /// Decompose the machine operand's target flags into two values - the direct
1650 /// target flag value and any of bit flags that are applied.
1651 virtual std::pair<unsigned, unsigned>
1652 decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const {
1653 return std::make_pair(0u, 0u);
1656 /// Return an array that contains the direct target flag values and their
1657 /// names.
1659 /// MIR Serialization is able to serialize only the target flags that are
1660 /// defined by this method.
1661 virtual ArrayRef<std::pair<unsigned, const char *>>
1662 getSerializableDirectMachineOperandTargetFlags() const {
1663 return None;
1666 /// Return an array that contains the bitmask target flag values and their
1667 /// names.
1669 /// MIR Serialization is able to serialize only the target flags that are
1670 /// defined by this method.
1671 virtual ArrayRef<std::pair<unsigned, const char *>>
1672 getSerializableBitmaskMachineOperandTargetFlags() const {
1673 return None;
1676 /// Return an array that contains the MMO target flag values and their
1677 /// names.
1679 /// MIR Serialization is able to serialize only the MMO target flags that are
1680 /// defined by this method.
1681 virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
1682 getSerializableMachineMemOperandTargetFlags() const {
1683 return None;
1686 /// Determines whether \p Inst is a tail call instruction. Override this
1687 /// method on targets that do not properly set MCID::Return and MCID::Call on
1688 /// tail call instructions."
1689 virtual bool isTailCall(const MachineInstr &Inst) const {
1690 return Inst.isReturn() && Inst.isCall();
1693 /// True if the instruction is bound to the top of its basic block and no
1694 /// other instructions shall be inserted before it. This can be implemented
1695 /// to prevent register allocator to insert spills before such instructions.
1696 virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
1697 return false;
1700 /// During PHI eleimination lets target to make necessary checks and
1701 /// insert the copy to the PHI destination register in a target specific
1702 /// manner.
1703 virtual MachineInstr *createPHIDestinationCopy(
1704 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt,
1705 const DebugLoc &DL, Register Src, Register Dst) const {
1706 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
1707 .addReg(Src);
1710 /// During PHI eleimination lets target to make necessary checks and
1711 /// insert the copy to the PHI destination register in a target specific
1712 /// manner.
1713 virtual MachineInstr *createPHISourceCopy(MachineBasicBlock &MBB,
1714 MachineBasicBlock::iterator InsPt,
1715 const DebugLoc &DL, Register Src,
1716 Register SrcSubReg,
1717 Register Dst) const {
1718 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
1719 .addReg(Src, 0, SrcSubReg);
1722 /// Returns a \p outliner::OutlinedFunction struct containing target-specific
1723 /// information for a set of outlining candidates.
1724 virtual outliner::OutlinedFunction getOutliningCandidateInfo(
1725 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
1726 llvm_unreachable(
1727 "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
1730 /// Returns how or if \p MI should be outlined.
1731 virtual outliner::InstrType
1732 getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
1733 llvm_unreachable(
1734 "Target didn't implement TargetInstrInfo::getOutliningType!");
1737 /// Optional target hook that returns true if \p MBB is safe to outline from,
1738 /// and returns any target-specific information in \p Flags.
1739 virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
1740 unsigned &Flags) const {
1741 return true;
1744 /// Insert a custom frame for outlined functions.
1745 virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
1746 const outliner::OutlinedFunction &OF) const {
1747 llvm_unreachable(
1748 "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
1751 /// Insert a call to an outlined function into the program.
1752 /// Returns an iterator to the spot where we inserted the call. This must be
1753 /// implemented by the target.
1754 virtual MachineBasicBlock::iterator
1755 insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
1756 MachineBasicBlock::iterator &It, MachineFunction &MF,
1757 const outliner::Candidate &C) const {
1758 llvm_unreachable(
1759 "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
1762 /// Return true if the function can safely be outlined from.
1763 /// A function \p MF is considered safe for outlining if an outlined function
1764 /// produced from instructions in F will produce a program which produces the
1765 /// same output for any set of given inputs.
1766 virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
1767 bool OutlineFromLinkOnceODRs) const {
1768 llvm_unreachable("Target didn't implement "
1769 "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
1772 /// Return true if the function should be outlined from by default.
1773 virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const {
1774 return false;
1777 /// Produce the expression describing the \p MI loading a value into
1778 /// the parameter's forwarding register.
1779 virtual Optional<ParamLoadedValue>
1780 describeLoadedValue(const MachineInstr &MI) const;
1782 private:
1783 unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
1784 unsigned CatchRetOpcode;
1785 unsigned ReturnOpcode;
1788 /// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
1789 template <> struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
1790 using RegInfo = DenseMapInfo<unsigned>;
1792 static inline TargetInstrInfo::RegSubRegPair getEmptyKey() {
1793 return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
1794 RegInfo::getEmptyKey());
1797 static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() {
1798 return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
1799 RegInfo::getTombstoneKey());
1802 /// Reuse getHashValue implementation from
1803 /// std::pair<unsigned, unsigned>.
1804 static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
1805 std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg);
1806 return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
1809 static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS,
1810 const TargetInstrInfo::RegSubRegPair &RHS) {
1811 return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
1812 RegInfo::isEqual(LHS.SubReg, RHS.SubReg);
1816 } // end namespace llvm
1818 #endif // LLVM_TARGET_TARGETINSTRINFO_H