1 //===- FastISel.h - Definition of the FastISel class ------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file defines the FastISel class.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_CODEGEN_FASTISEL_H
15 #define LLVM_CODEGEN_FASTISEL_H
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/IR/Attributes.h"
23 #include "llvm/IR/CallSite.h"
24 #include "llvm/IR/CallingConv.h"
25 #include "llvm/IR/DebugLoc.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/Support/MachineValueType.h"
42 class FunctionLoweringInfo
;
44 class MachineConstantPool
;
45 class MachineFrameInfo
;
46 class MachineFunction
;
48 class MachineMemOperand
;
50 class MachineRegisterInfo
;
54 class TargetInstrInfo
;
55 class TargetLibraryInfo
;
57 class TargetRegisterClass
;
58 class TargetRegisterInfo
;
63 /// This is a fast-path instruction selection class that generates poor
64 /// code and doesn't support illegal types or non-trivial lowering, but runs
68 using ArgListEntry
= TargetLoweringBase::ArgListEntry
;
69 using ArgListTy
= TargetLoweringBase::ArgListTy
;
70 struct CallLoweringInfo
{
71 Type
*RetTy
= nullptr;
76 bool DoesNotReturn
: 1;
77 bool IsReturnValueUsed
: 1;
78 bool IsPatchPoint
: 1;
80 // IsTailCall Should be modified by implementations of FastLowerCall
81 // that perform tail call conversions.
82 bool IsTailCall
= false;
84 unsigned NumFixedArgs
= -1;
85 CallingConv::ID CallConv
= CallingConv::C
;
86 const Value
*Callee
= nullptr;
87 MCSymbol
*Symbol
= nullptr;
89 ImmutableCallSite
*CS
= nullptr;
90 MachineInstr
*Call
= nullptr;
91 unsigned ResultReg
= 0;
92 unsigned NumResultRegs
= 0;
94 SmallVector
<Value
*, 16> OutVals
;
95 SmallVector
<ISD::ArgFlagsTy
, 16> OutFlags
;
96 SmallVector
<Register
, 16> OutRegs
;
97 SmallVector
<ISD::InputArg
, 4> Ins
;
98 SmallVector
<Register
, 4> InRegs
;
101 : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
102 DoesNotReturn(false), IsReturnValueUsed(true), IsPatchPoint(false) {}
104 CallLoweringInfo
&setCallee(Type
*ResultTy
, FunctionType
*FuncTy
,
105 const Value
*Target
, ArgListTy
&&ArgsList
,
106 ImmutableCallSite
&Call
) {
110 IsInReg
= Call
.hasRetAttr(Attribute::InReg
);
111 DoesNotReturn
= Call
.doesNotReturn();
112 IsVarArg
= FuncTy
->isVarArg();
113 IsReturnValueUsed
= !Call
.getInstruction()->use_empty();
114 RetSExt
= Call
.hasRetAttr(Attribute::SExt
);
115 RetZExt
= Call
.hasRetAttr(Attribute::ZExt
);
117 CallConv
= Call
.getCallingConv();
118 Args
= std::move(ArgsList
);
119 NumFixedArgs
= FuncTy
->getNumParams();
126 CallLoweringInfo
&setCallee(Type
*ResultTy
, FunctionType
*FuncTy
,
127 MCSymbol
*Target
, ArgListTy
&&ArgsList
,
128 ImmutableCallSite
&Call
,
129 unsigned FixedArgs
= ~0U) {
131 Callee
= Call
.getCalledValue();
134 IsInReg
= Call
.hasRetAttr(Attribute::InReg
);
135 DoesNotReturn
= Call
.doesNotReturn();
136 IsVarArg
= FuncTy
->isVarArg();
137 IsReturnValueUsed
= !Call
.getInstruction()->use_empty();
138 RetSExt
= Call
.hasRetAttr(Attribute::SExt
);
139 RetZExt
= Call
.hasRetAttr(Attribute::ZExt
);
141 CallConv
= Call
.getCallingConv();
142 Args
= std::move(ArgsList
);
143 NumFixedArgs
= (FixedArgs
== ~0U) ? FuncTy
->getNumParams() : FixedArgs
;
150 CallLoweringInfo
&setCallee(CallingConv::ID CC
, Type
*ResultTy
,
151 const Value
*Target
, ArgListTy
&&ArgsList
,
152 unsigned FixedArgs
= ~0U) {
156 Args
= std::move(ArgsList
);
157 NumFixedArgs
= (FixedArgs
== ~0U) ? Args
.size() : FixedArgs
;
161 CallLoweringInfo
&setCallee(const DataLayout
&DL
, MCContext
&Ctx
,
162 CallingConv::ID CC
, Type
*ResultTy
,
163 StringRef Target
, ArgListTy
&&ArgsList
,
164 unsigned FixedArgs
= ~0U);
166 CallLoweringInfo
&setCallee(CallingConv::ID CC
, Type
*ResultTy
,
167 MCSymbol
*Target
, ArgListTy
&&ArgsList
,
168 unsigned FixedArgs
= ~0U) {
172 Args
= std::move(ArgsList
);
173 NumFixedArgs
= (FixedArgs
== ~0U) ? Args
.size() : FixedArgs
;
177 CallLoweringInfo
&setTailCall(bool Value
= true) {
182 CallLoweringInfo
&setIsPatchPoint(bool Value
= true) {
183 IsPatchPoint
= Value
;
187 ArgListTy
&getArgs() { return Args
; }
202 DenseMap
<const Value
*, unsigned> LocalValueMap
;
203 FunctionLoweringInfo
&FuncInfo
;
205 MachineRegisterInfo
&MRI
;
206 MachineFrameInfo
&MFI
;
207 MachineConstantPool
&MCP
;
209 const TargetMachine
&TM
;
210 const DataLayout
&DL
;
211 const TargetInstrInfo
&TII
;
212 const TargetLowering
&TLI
;
213 const TargetRegisterInfo
&TRI
;
214 const TargetLibraryInfo
*LibInfo
;
215 bool SkipTargetIndependentISel
;
217 /// The position of the last instruction for materializing constants
218 /// for use in the current block. It resets to EmitStartPt when it makes sense
219 /// (for example, it's usually profitable to avoid function calls between the
220 /// definition and the use)
221 MachineInstr
*LastLocalValue
;
223 /// The top most instruction in the current block that is allowed for
224 /// emitting local variables. LastLocalValue resets to EmitStartPt when it
225 /// makes sense (for example, on function calls)
226 MachineInstr
*EmitStartPt
;
228 /// Last local value flush point. On a subsequent flush, no local value will
229 /// sink past this point.
230 MachineBasicBlock::iterator LastFlushPoint
;
235 /// Return the position of the last instruction emitted for
236 /// materializing constants for use in the current block.
237 MachineInstr
*getLastLocalValue() { return LastLocalValue
; }
239 /// Update the position of the last instruction emitted for
240 /// materializing constants for use in the current block.
241 void setLastLocalValue(MachineInstr
*I
) {
246 /// Set the current block to which generated machine instructions will
248 void startNewBlock();
250 /// Flush the local value map and sink local values if possible.
251 void finishBasicBlock();
253 /// Return current debug location information.
254 DebugLoc
getCurDebugLoc() const { return DbgLoc
; }
256 /// Do "fast" instruction selection for function arguments and append
257 /// the machine instructions to the current block. Returns true when
259 bool lowerArguments();
261 /// Do "fast" instruction selection for the given LLVM IR instruction
262 /// and append the generated machine instructions to the current block.
263 /// Returns true if selection was successful.
264 bool selectInstruction(const Instruction
*I
);
266 /// Do "fast" instruction selection for the given LLVM IR operator
267 /// (Instruction or ConstantExpr), and append generated machine instructions
268 /// to the current block. Return true if selection was successful.
269 bool selectOperator(const User
*I
, unsigned Opcode
);
271 /// Create a virtual register and arrange for it to be assigned the
272 /// value for the given LLVM value.
273 unsigned getRegForValue(const Value
*V
);
275 /// Look up the value to see if its value is already cached in a
276 /// register. It may be defined by instructions across blocks or defined
278 unsigned lookUpRegForValue(const Value
*V
);
280 /// This is a wrapper around getRegForValue that also takes care of
281 /// truncating or sign-extending the given getelementptr index value.
282 std::pair
<unsigned, bool> getRegForGEPIndex(const Value
*Idx
);
284 /// We're checking to see if we can fold \p LI into \p FoldInst. Note
285 /// that we could have a sequence where multiple LLVM IR instructions are
286 /// folded into the same machineinstr. For example we could have:
288 /// A: x = load i32 *P
289 /// B: y = icmp A, 42
292 /// In this scenario, \p LI is "A", and \p FoldInst is "C". We know about "B"
293 /// (and any other folded instructions) because it is between A and C.
295 /// If we succeed folding, return true.
296 bool tryToFoldLoad(const LoadInst
*LI
, const Instruction
*FoldInst
);
298 /// The specified machine instr operand is a vreg, and that vreg is
299 /// being provided by the specified load instruction. If possible, try to
300 /// fold the load as an operand to the instruction, returning true if
303 /// This method should be implemented by targets.
304 virtual bool tryToFoldLoadIntoMI(MachineInstr
* /*MI*/, unsigned /*OpNo*/,
305 const LoadInst
* /*LI*/) {
309 /// Reset InsertPt to prepare for inserting instructions into the
311 void recomputeInsertPt();
313 /// Remove all dead instructions between the I and E.
314 void removeDeadCode(MachineBasicBlock::iterator I
,
315 MachineBasicBlock::iterator E
);
318 MachineBasicBlock::iterator InsertPt
;
322 /// Prepare InsertPt to begin inserting instructions into the local
323 /// value area and return the old insert position.
324 SavePoint
enterLocalValueArea();
326 /// Reset InsertPt to the given old insert position.
327 void leaveLocalValueArea(SavePoint Old
);
330 explicit FastISel(FunctionLoweringInfo
&FuncInfo
,
331 const TargetLibraryInfo
*LibInfo
,
332 bool SkipTargetIndependentISel
= false);
334 /// This method is called by target-independent code when the normal
335 /// FastISel process fails to select an instruction. This gives targets a
336 /// chance to emit code for anything that doesn't fit into FastISel's
337 /// framework. It returns true if it was successful.
338 virtual bool fastSelectInstruction(const Instruction
*I
) = 0;
340 /// This method is called by target-independent code to do target-
341 /// specific argument lowering. It returns true if it was successful.
342 virtual bool fastLowerArguments();
344 /// This method is called by target-independent code to do target-
345 /// specific call lowering. It returns true if it was successful.
346 virtual bool fastLowerCall(CallLoweringInfo
&CLI
);
348 /// This method is called by target-independent code to do target-
349 /// specific intrinsic lowering. It returns true if it was successful.
350 virtual bool fastLowerIntrinsicCall(const IntrinsicInst
*II
);
352 /// This method is called by target-independent code to request that an
353 /// instruction with the given type and opcode be emitted.
354 virtual unsigned fastEmit_(MVT VT
, MVT RetVT
, unsigned Opcode
);
356 /// This method is called by target-independent code to request that an
357 /// instruction with the given type, opcode, and register operand be emitted.
358 virtual unsigned fastEmit_r(MVT VT
, MVT RetVT
, unsigned Opcode
, unsigned Op0
,
361 /// This method is called by target-independent code to request that an
362 /// instruction with the given type, opcode, and register operands be emitted.
363 virtual unsigned fastEmit_rr(MVT VT
, MVT RetVT
, unsigned Opcode
, unsigned Op0
,
364 bool Op0IsKill
, unsigned Op1
, bool Op1IsKill
);
366 /// This method is called by target-independent code to request that an
367 /// instruction with the given type, opcode, and register and immediate
368 /// operands be emitted.
369 virtual unsigned fastEmit_ri(MVT VT
, MVT RetVT
, unsigned Opcode
, unsigned Op0
,
370 bool Op0IsKill
, uint64_t Imm
);
372 /// This method is a wrapper of fastEmit_ri.
374 /// It first tries to emit an instruction with an immediate operand using
375 /// fastEmit_ri. If that fails, it materializes the immediate into a register
376 /// and try fastEmit_rr instead.
377 unsigned fastEmit_ri_(MVT VT
, unsigned Opcode
, unsigned Op0
, bool Op0IsKill
,
378 uint64_t Imm
, MVT ImmType
);
380 /// This method is called by target-independent code to request that an
381 /// instruction with the given type, opcode, and immediate operand be emitted.
382 virtual unsigned fastEmit_i(MVT VT
, MVT RetVT
, unsigned Opcode
, uint64_t Imm
);
384 /// This method is called by target-independent code to request that an
385 /// instruction with the given type, opcode, and floating-point immediate
386 /// operand be emitted.
387 virtual unsigned fastEmit_f(MVT VT
, MVT RetVT
, unsigned Opcode
,
388 const ConstantFP
*FPImm
);
390 /// Emit a MachineInstr with no operands and a result register in the
391 /// given register class.
392 unsigned fastEmitInst_(unsigned MachineInstOpcode
,
393 const TargetRegisterClass
*RC
);
395 /// Emit a MachineInstr with one register operand and a result register
396 /// in the given register class.
397 unsigned fastEmitInst_r(unsigned MachineInstOpcode
,
398 const TargetRegisterClass
*RC
, unsigned Op0
,
401 /// Emit a MachineInstr with two register operands and a result
402 /// register in the given register class.
403 unsigned fastEmitInst_rr(unsigned MachineInstOpcode
,
404 const TargetRegisterClass
*RC
, unsigned Op0
,
405 bool Op0IsKill
, unsigned Op1
, bool Op1IsKill
);
407 /// Emit a MachineInstr with three register operands and a result
408 /// register in the given register class.
409 unsigned fastEmitInst_rrr(unsigned MachineInstOpcode
,
410 const TargetRegisterClass
*RC
, unsigned Op0
,
411 bool Op0IsKill
, unsigned Op1
, bool Op1IsKill
,
412 unsigned Op2
, bool Op2IsKill
);
414 /// Emit a MachineInstr with a register operand, an immediate, and a
415 /// result register in the given register class.
416 unsigned fastEmitInst_ri(unsigned MachineInstOpcode
,
417 const TargetRegisterClass
*RC
, unsigned Op0
,
418 bool Op0IsKill
, uint64_t Imm
);
420 /// Emit a MachineInstr with one register operand and two immediate
422 unsigned fastEmitInst_rii(unsigned MachineInstOpcode
,
423 const TargetRegisterClass
*RC
, unsigned Op0
,
424 bool Op0IsKill
, uint64_t Imm1
, uint64_t Imm2
);
426 /// Emit a MachineInstr with a floating point immediate, and a result
427 /// register in the given register class.
428 unsigned fastEmitInst_f(unsigned MachineInstOpcode
,
429 const TargetRegisterClass
*RC
,
430 const ConstantFP
*FPImm
);
432 /// Emit a MachineInstr with two register operands, an immediate, and a
433 /// result register in the given register class.
434 unsigned fastEmitInst_rri(unsigned MachineInstOpcode
,
435 const TargetRegisterClass
*RC
, unsigned Op0
,
436 bool Op0IsKill
, unsigned Op1
, bool Op1IsKill
,
439 /// Emit a MachineInstr with a single immediate operand, and a result
440 /// register in the given register class.
441 unsigned fastEmitInst_i(unsigned MachineInstOpcode
,
442 const TargetRegisterClass
*RC
, uint64_t Imm
);
444 /// Emit a MachineInstr for an extract_subreg from a specified index of
445 /// a superregister to a specified type.
446 unsigned fastEmitInst_extractsubreg(MVT RetVT
, unsigned Op0
, bool Op0IsKill
,
449 /// Emit MachineInstrs to compute the value of Op with all but the
450 /// least significant bit set to zero.
451 unsigned fastEmitZExtFromI1(MVT VT
, unsigned Op0
, bool Op0IsKill
);
453 /// Emit an unconditional branch to the given block, unless it is the
454 /// immediate (fall-through) successor, and update the CFG.
455 void fastEmitBranch(MachineBasicBlock
*MSucc
, const DebugLoc
&DbgLoc
);
457 /// Emit an unconditional branch to \p FalseMBB, obtains the branch weight
458 /// and adds TrueMBB and FalseMBB to the successor list.
459 void finishCondBranch(const BasicBlock
*BranchBB
, MachineBasicBlock
*TrueMBB
,
460 MachineBasicBlock
*FalseMBB
);
462 /// Update the value map to include the new mapping for this
463 /// instruction, or insert an extra copy to get the result in a previous
464 /// determined register.
466 /// NOTE: This is only necessary because we might select a block that uses a
467 /// value before we select the block that defines the value. It might be
468 /// possible to fix this by selecting blocks in reverse postorder.
469 void updateValueMap(const Value
*I
, unsigned Reg
, unsigned NumRegs
= 1);
471 unsigned createResultReg(const TargetRegisterClass
*RC
);
473 /// Try to constrain Op so that it is usable by argument OpNum of the
474 /// provided MCInstrDesc. If this fails, create a new virtual register in the
475 /// correct class and COPY the value there.
476 unsigned constrainOperandRegClass(const MCInstrDesc
&II
, unsigned Op
,
479 /// Emit a constant in a register using target-specific logic, such as
480 /// constant pool loads.
481 virtual unsigned fastMaterializeConstant(const Constant
*C
) { return 0; }
483 /// Emit an alloca address in a register using target-specific logic.
484 virtual unsigned fastMaterializeAlloca(const AllocaInst
*C
) { return 0; }
486 /// Emit the floating-point constant +0.0 in a register using target-
488 virtual unsigned fastMaterializeFloatZero(const ConstantFP
*CF
) {
492 /// Check if \c Add is an add that can be safely folded into \c GEP.
494 /// \c Add can be folded into \c GEP if:
495 /// - \c Add is an add,
496 /// - \c Add's size matches \c GEP's,
497 /// - \c Add is in the same basic block as \c GEP, and
498 /// - \c Add has a constant operand.
499 bool canFoldAddIntoGEP(const User
*GEP
, const Value
*Add
);
501 /// Test whether the given value has exactly one use.
502 bool hasTrivialKill(const Value
*V
);
504 /// Create a machine mem operand from the given instruction.
505 MachineMemOperand
*createMachineMemOperandFor(const Instruction
*I
) const;
507 CmpInst::Predicate
optimizeCmpPredicate(const CmpInst
*CI
) const;
509 bool lowerCallTo(const CallInst
*CI
, MCSymbol
*Symbol
, unsigned NumArgs
);
510 bool lowerCallTo(const CallInst
*CI
, const char *SymName
,
512 bool lowerCallTo(CallLoweringInfo
&CLI
);
514 bool isCommutativeIntrinsic(IntrinsicInst
const *II
) {
515 switch (II
->getIntrinsicID()) {
516 case Intrinsic::sadd_with_overflow
:
517 case Intrinsic::uadd_with_overflow
:
518 case Intrinsic::smul_with_overflow
:
519 case Intrinsic::umul_with_overflow
:
526 bool lowerCall(const CallInst
*I
);
527 /// Select and emit code for a binary operator instruction, which has
528 /// an opcode which directly corresponds to the given ISD opcode.
529 bool selectBinaryOp(const User
*I
, unsigned ISDOpcode
);
530 bool selectFNeg(const User
*I
, const Value
*In
);
531 bool selectGetElementPtr(const User
*I
);
532 bool selectStackmap(const CallInst
*I
);
533 bool selectPatchpoint(const CallInst
*I
);
534 bool selectCall(const User
*I
);
535 bool selectIntrinsicCall(const IntrinsicInst
*II
);
536 bool selectBitCast(const User
*I
);
537 bool selectCast(const User
*I
, unsigned Opcode
);
538 bool selectExtractValue(const User
*U
);
539 bool selectInsertValue(const User
*I
);
540 bool selectXRayCustomEvent(const CallInst
*II
);
541 bool selectXRayTypedEvent(const CallInst
*II
);
544 /// Handle PHI nodes in successor blocks.
546 /// Emit code to ensure constants are copied into registers when needed.
547 /// Remember the virtual registers that need to be added to the Machine PHI
548 /// nodes as input. We cannot just directly add them, because expansion might
549 /// result in multiple MBB's for one BB. As such, the start of the BB might
550 /// correspond to a different MBB than the end.
551 bool handlePHINodesInSuccessorBlocks(const BasicBlock
*LLVMBB
);
553 /// Helper for materializeRegForValue to materialize a constant in a
554 /// target-independent way.
555 unsigned materializeConstant(const Value
*V
, MVT VT
);
557 /// Helper for getRegForVale. This function is called when the value
558 /// isn't already available in a register and must be materialized with new
560 unsigned materializeRegForValue(const Value
*V
, MVT VT
);
562 /// Clears LocalValueMap and moves the area for the new local variables
563 /// to the beginning of the block. It helps to avoid spilling cached variables
564 /// across heavy instructions like calls.
565 void flushLocalValueMap();
567 /// Removes dead local value instructions after SavedLastLocalvalue.
568 void removeDeadLocalValueCode(MachineInstr
*SavedLastLocalValue
);
570 struct InstOrderMap
{
571 DenseMap
<MachineInstr
*, unsigned> Orders
;
572 MachineInstr
*FirstTerminator
= nullptr;
573 unsigned FirstTerminatorOrder
= std::numeric_limits
<unsigned>::max();
575 void initialize(MachineBasicBlock
*MBB
,
576 MachineBasicBlock::iterator LastFlushPoint
);
579 /// Sinks the local value materialization instruction LocalMI to its first use
580 /// in the basic block, or deletes it if it is not used.
581 void sinkLocalValueMaterialization(MachineInstr
&LocalMI
, unsigned DefReg
,
582 InstOrderMap
&OrderMap
);
584 /// Insertion point before trying to select the current instruction.
585 MachineBasicBlock::iterator SavedInsertPt
;
587 /// Add a stackmap or patchpoint intrinsic call's live variable
588 /// operands to a stackmap or patchpoint machine instruction.
589 bool addStackMapLiveVars(SmallVectorImpl
<MachineOperand
> &Ops
,
590 const CallInst
*CI
, unsigned StartIdx
);
591 bool lowerCallOperands(const CallInst
*CI
, unsigned ArgIdx
, unsigned NumArgs
,
592 const Value
*Callee
, bool ForceRetVoidTy
,
593 CallLoweringInfo
&CLI
);
596 } // end namespace llvm
598 #endif // LLVM_CODEGEN_FASTISEL_H