1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/Operator.h"
47 #include "llvm/CodeGen/FastISel.h"
48 #include "llvm/CodeGen/FunctionLoweringInfo.h"
49 #include "llvm/CodeGen/MachineInstrBuilder.h"
50 #include "llvm/CodeGen/MachineModuleInfo.h"
51 #include "llvm/CodeGen/MachineRegisterInfo.h"
52 #include "llvm/Analysis/DebugInfo.h"
53 #include "llvm/Analysis/Loads.h"
54 #include "llvm/Target/TargetData.h"
55 #include "llvm/Target/TargetInstrInfo.h"
56 #include "llvm/Target/TargetLowering.h"
57 #include "llvm/Target/TargetMachine.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/Debug.h"
62 /// startNewBlock - Set the current block to which generated machine
63 /// instructions will be appended, and clear the local CSE map.
65 void FastISel::startNewBlock() {
66 LocalValueMap
.clear();
68 // Start out as null, meaining no local-value instructions have
72 // Advance the last local value past any EH_LABEL instructions.
73 MachineBasicBlock::iterator
74 I
= FuncInfo
.MBB
->begin(), E
= FuncInfo
.MBB
->end();
75 while (I
!= E
&& I
->getOpcode() == TargetOpcode::EH_LABEL
) {
81 bool FastISel::hasTrivialKill(const Value
*V
) const {
82 // Don't consider constants or arguments to have trivial kills.
83 const Instruction
*I
= dyn_cast
<Instruction
>(V
);
87 // No-op casts are trivially coalesced by fast-isel.
88 if (const CastInst
*Cast
= dyn_cast
<CastInst
>(I
))
89 if (Cast
->isNoopCast(TD
.getIntPtrType(Cast
->getContext())) &&
90 !hasTrivialKill(Cast
->getOperand(0)))
93 // Only instructions with a single use in the same basic block are considered
94 // to have trivial kills.
95 return I
->hasOneUse() &&
96 !(I
->getOpcode() == Instruction::BitCast
||
97 I
->getOpcode() == Instruction::PtrToInt
||
98 I
->getOpcode() == Instruction::IntToPtr
) &&
99 cast
<Instruction
>(*I
->use_begin())->getParent() == I
->getParent();
102 unsigned FastISel::getRegForValue(const Value
*V
) {
103 EVT RealVT
= TLI
.getValueType(V
->getType(), /*AllowUnknown=*/true);
104 // Don't handle non-simple values in FastISel.
105 if (!RealVT
.isSimple())
108 // Ignore illegal types. We must do this before looking up the value
109 // in ValueMap because Arguments are given virtual registers regardless
110 // of whether FastISel can handle them.
111 MVT VT
= RealVT
.getSimpleVT();
112 if (!TLI
.isTypeLegal(VT
)) {
113 // Promote MVT::i1 to a legal type though, because it's common and easy.
115 VT
= TLI
.getTypeToTransformTo(V
->getContext(), VT
).getSimpleVT();
120 // Look up the value to see if we already have a register for it. We
121 // cache values defined by Instructions across blocks, and other values
122 // only locally. This is because Instructions already have the SSA
123 // def-dominates-use requirement enforced.
124 DenseMap
<const Value
*, unsigned>::iterator I
= FuncInfo
.ValueMap
.find(V
);
125 if (I
!= FuncInfo
.ValueMap
.end())
128 unsigned Reg
= LocalValueMap
[V
];
132 // In bottom-up mode, just create the virtual register which will be used
133 // to hold the value. It will be materialized later.
134 if (isa
<Instruction
>(V
) &&
135 (!isa
<AllocaInst
>(V
) ||
136 !FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(V
))))
137 return FuncInfo
.InitializeRegForValue(V
);
139 SavePoint SaveInsertPt
= enterLocalValueArea();
141 // Materialize the value in a register. Emit any instructions in the
143 Reg
= materializeRegForValue(V
, VT
);
145 leaveLocalValueArea(SaveInsertPt
);
150 /// materializeRegForValue - Helper for getRegForValue. This function is
151 /// called when the value isn't already available in a register and must
152 /// be materialized with new instructions.
153 unsigned FastISel::materializeRegForValue(const Value
*V
, MVT VT
) {
156 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
)) {
157 if (CI
->getValue().getActiveBits() <= 64)
158 Reg
= FastEmit_i(VT
, VT
, ISD::Constant
, CI
->getZExtValue());
159 } else if (isa
<AllocaInst
>(V
)) {
160 Reg
= TargetMaterializeAlloca(cast
<AllocaInst
>(V
));
161 } else if (isa
<ConstantPointerNull
>(V
)) {
162 // Translate this as an integer zero so that it can be
163 // local-CSE'd with actual integer zeros.
165 getRegForValue(Constant::getNullValue(TD
.getIntPtrType(V
->getContext())));
166 } else if (const ConstantFP
*CF
= dyn_cast
<ConstantFP
>(V
)) {
167 // Try to emit the constant directly.
168 Reg
= FastEmit_f(VT
, VT
, ISD::ConstantFP
, CF
);
171 // Try to emit the constant by using an integer constant with a cast.
172 const APFloat
&Flt
= CF
->getValueAPF();
173 EVT IntVT
= TLI
.getPointerTy();
176 uint32_t IntBitWidth
= IntVT
.getSizeInBits();
178 (void) Flt
.convertToInteger(x
, IntBitWidth
, /*isSigned=*/true,
179 APFloat::rmTowardZero
, &isExact
);
181 APInt
IntVal(IntBitWidth
, 2, x
);
183 unsigned IntegerReg
=
184 getRegForValue(ConstantInt::get(V
->getContext(), IntVal
));
186 Reg
= FastEmit_r(IntVT
.getSimpleVT(), VT
, ISD::SINT_TO_FP
,
187 IntegerReg
, /*Kill=*/false);
190 } else if (const Operator
*Op
= dyn_cast
<Operator
>(V
)) {
191 if (!SelectOperator(Op
, Op
->getOpcode()))
192 if (!isa
<Instruction
>(Op
) ||
193 !TargetSelectInstruction(cast
<Instruction
>(Op
)))
195 Reg
= lookUpRegForValue(Op
);
196 } else if (isa
<UndefValue
>(V
)) {
197 Reg
= createResultReg(TLI
.getRegClassFor(VT
));
198 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
,
199 TII
.get(TargetOpcode::IMPLICIT_DEF
), Reg
);
202 // If target-independent code couldn't handle the value, give target-specific
204 if (!Reg
&& isa
<Constant
>(V
))
205 Reg
= TargetMaterializeConstant(cast
<Constant
>(V
));
207 // Don't cache constant materializations in the general ValueMap.
208 // To do so would require tracking what uses they dominate.
210 LocalValueMap
[V
] = Reg
;
211 LastLocalValue
= MRI
.getVRegDef(Reg
);
216 unsigned FastISel::lookUpRegForValue(const Value
*V
) {
217 // Look up the value to see if we already have a register for it. We
218 // cache values defined by Instructions across blocks, and other values
219 // only locally. This is because Instructions already have the SSA
220 // def-dominates-use requirement enforced.
221 DenseMap
<const Value
*, unsigned>::iterator I
= FuncInfo
.ValueMap
.find(V
);
222 if (I
!= FuncInfo
.ValueMap
.end())
224 return LocalValueMap
[V
];
227 /// UpdateValueMap - Update the value map to include the new mapping for this
228 /// instruction, or insert an extra copy to get the result in a previous
229 /// determined register.
230 /// NOTE: This is only necessary because we might select a block that uses
231 /// a value before we select the block that defines the value. It might be
232 /// possible to fix this by selecting blocks in reverse postorder.
233 unsigned FastISel::UpdateValueMap(const Value
*I
, unsigned Reg
) {
234 if (!isa
<Instruction
>(I
)) {
235 LocalValueMap
[I
] = Reg
;
239 unsigned &AssignedReg
= FuncInfo
.ValueMap
[I
];
240 if (AssignedReg
== 0)
241 // Use the new register.
243 else if (Reg
!= AssignedReg
) {
244 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
245 FuncInfo
.RegFixups
[AssignedReg
] = Reg
;
253 std::pair
<unsigned, bool> FastISel::getRegForGEPIndex(const Value
*Idx
) {
254 unsigned IdxN
= getRegForValue(Idx
);
256 // Unhandled operand. Halt "fast" selection and bail.
257 return std::pair
<unsigned, bool>(0, false);
259 bool IdxNIsKill
= hasTrivialKill(Idx
);
261 // If the index is smaller or larger than intptr_t, truncate or extend it.
262 MVT PtrVT
= TLI
.getPointerTy();
263 EVT IdxVT
= EVT::getEVT(Idx
->getType(), /*HandleUnknown=*/false);
264 if (IdxVT
.bitsLT(PtrVT
)) {
265 IdxN
= FastEmit_r(IdxVT
.getSimpleVT(), PtrVT
, ISD::SIGN_EXTEND
,
269 else if (IdxVT
.bitsGT(PtrVT
)) {
270 IdxN
= FastEmit_r(IdxVT
.getSimpleVT(), PtrVT
, ISD::TRUNCATE
,
274 return std::pair
<unsigned, bool>(IdxN
, IdxNIsKill
);
277 void FastISel::recomputeInsertPt() {
278 if (getLastLocalValue()) {
279 FuncInfo
.InsertPt
= getLastLocalValue();
280 FuncInfo
.MBB
= FuncInfo
.InsertPt
->getParent();
283 FuncInfo
.InsertPt
= FuncInfo
.MBB
->getFirstNonPHI();
285 // Now skip past any EH_LABELs, which must remain at the beginning.
286 while (FuncInfo
.InsertPt
!= FuncInfo
.MBB
->end() &&
287 FuncInfo
.InsertPt
->getOpcode() == TargetOpcode::EH_LABEL
)
291 FastISel::SavePoint
FastISel::enterLocalValueArea() {
292 MachineBasicBlock::iterator OldInsertPt
= FuncInfo
.InsertPt
;
296 SavePoint SP
= { OldInsertPt
, OldDL
};
300 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt
) {
301 if (FuncInfo
.InsertPt
!= FuncInfo
.MBB
->begin())
302 LastLocalValue
= llvm::prior(FuncInfo
.InsertPt
);
304 // Restore the previous insert position.
305 FuncInfo
.InsertPt
= OldInsertPt
.InsertPt
;
309 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
310 /// which has an opcode which directly corresponds to the given ISD opcode.
312 bool FastISel::SelectBinaryOp(const User
*I
, unsigned ISDOpcode
) {
313 EVT VT
= EVT::getEVT(I
->getType(), /*HandleUnknown=*/true);
314 if (VT
== MVT::Other
|| !VT
.isSimple())
315 // Unhandled type. Halt "fast" selection and bail.
318 // We only handle legal types. For example, on x86-32 the instruction
319 // selector contains all of the 64-bit instructions from x86-64,
320 // under the assumption that i64 won't be used if the target doesn't
322 if (!TLI
.isTypeLegal(VT
)) {
323 // MVT::i1 is special. Allow AND, OR, or XOR because they
324 // don't require additional zeroing, which makes them easy.
326 (ISDOpcode
== ISD::AND
|| ISDOpcode
== ISD::OR
||
327 ISDOpcode
== ISD::XOR
))
328 VT
= TLI
.getTypeToTransformTo(I
->getContext(), VT
);
333 // Check if the first operand is a constant, and handle it as "ri". At -O0,
334 // we don't have anything that canonicalizes operand order.
335 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
->getOperand(0)))
336 if (isa
<Instruction
>(I
) && cast
<Instruction
>(I
)->isCommutative()) {
337 unsigned Op1
= getRegForValue(I
->getOperand(1));
338 if (Op1
== 0) return false;
340 bool Op1IsKill
= hasTrivialKill(I
->getOperand(1));
342 unsigned ResultReg
= FastEmit_ri_(VT
.getSimpleVT(), ISDOpcode
, Op1
,
343 Op1IsKill
, CI
->getZExtValue(),
345 if (ResultReg
== 0) return false;
347 // We successfully emitted code for the given LLVM Instruction.
348 UpdateValueMap(I
, ResultReg
);
353 unsigned Op0
= getRegForValue(I
->getOperand(0));
354 if (Op0
== 0) // Unhandled operand. Halt "fast" selection and bail.
357 bool Op0IsKill
= hasTrivialKill(I
->getOperand(0));
359 // Check if the second operand is a constant and handle it appropriately.
360 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
361 uint64_t Imm
= CI
->getZExtValue();
363 // Transform "sdiv exact X, 8" -> "sra X, 3".
364 if (ISDOpcode
== ISD::SDIV
&& isa
<BinaryOperator
>(I
) &&
365 cast
<BinaryOperator
>(I
)->isExact() &&
366 isPowerOf2_64(Imm
)) {
368 ISDOpcode
= ISD::SRA
;
371 unsigned ResultReg
= FastEmit_ri_(VT
.getSimpleVT(), ISDOpcode
, Op0
,
372 Op0IsKill
, Imm
, VT
.getSimpleVT());
373 if (ResultReg
== 0) return false;
375 // We successfully emitted code for the given LLVM Instruction.
376 UpdateValueMap(I
, ResultReg
);
380 // Check if the second operand is a constant float.
381 if (ConstantFP
*CF
= dyn_cast
<ConstantFP
>(I
->getOperand(1))) {
382 unsigned ResultReg
= FastEmit_rf(VT
.getSimpleVT(), VT
.getSimpleVT(),
383 ISDOpcode
, Op0
, Op0IsKill
, CF
);
384 if (ResultReg
!= 0) {
385 // We successfully emitted code for the given LLVM Instruction.
386 UpdateValueMap(I
, ResultReg
);
391 unsigned Op1
= getRegForValue(I
->getOperand(1));
393 // Unhandled operand. Halt "fast" selection and bail.
396 bool Op1IsKill
= hasTrivialKill(I
->getOperand(1));
398 // Now we have both operands in registers. Emit the instruction.
399 unsigned ResultReg
= FastEmit_rr(VT
.getSimpleVT(), VT
.getSimpleVT(),
404 // Target-specific code wasn't able to find a machine opcode for
405 // the given ISD opcode and type. Halt "fast" selection and bail.
408 // We successfully emitted code for the given LLVM Instruction.
409 UpdateValueMap(I
, ResultReg
);
413 bool FastISel::SelectGetElementPtr(const User
*I
) {
414 unsigned N
= getRegForValue(I
->getOperand(0));
416 // Unhandled operand. Halt "fast" selection and bail.
419 bool NIsKill
= hasTrivialKill(I
->getOperand(0));
421 const Type
*Ty
= I
->getOperand(0)->getType();
422 MVT VT
= TLI
.getPointerTy();
423 for (GetElementPtrInst::const_op_iterator OI
= I
->op_begin()+1,
424 E
= I
->op_end(); OI
!= E
; ++OI
) {
425 const Value
*Idx
= *OI
;
426 if (const StructType
*StTy
= dyn_cast
<StructType
>(Ty
)) {
427 unsigned Field
= cast
<ConstantInt
>(Idx
)->getZExtValue();
430 uint64_t Offs
= TD
.getStructLayout(StTy
)->getElementOffset(Field
);
431 // FIXME: This can be optimized by combining the add with a
433 N
= FastEmit_ri_(VT
, ISD::ADD
, N
, NIsKill
, Offs
, VT
);
435 // Unhandled operand. Halt "fast" selection and bail.
439 Ty
= StTy
->getElementType(Field
);
441 Ty
= cast
<SequentialType
>(Ty
)->getElementType();
443 // If this is a constant subscript, handle it quickly.
444 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Idx
)) {
445 if (CI
->isZero()) continue;
447 TD
.getTypeAllocSize(Ty
)*cast
<ConstantInt
>(CI
)->getSExtValue();
448 N
= FastEmit_ri_(VT
, ISD::ADD
, N
, NIsKill
, Offs
, VT
);
450 // Unhandled operand. Halt "fast" selection and bail.
456 // N = N + Idx * ElementSize;
457 uint64_t ElementSize
= TD
.getTypeAllocSize(Ty
);
458 std::pair
<unsigned, bool> Pair
= getRegForGEPIndex(Idx
);
459 unsigned IdxN
= Pair
.first
;
460 bool IdxNIsKill
= Pair
.second
;
462 // Unhandled operand. Halt "fast" selection and bail.
465 if (ElementSize
!= 1) {
466 IdxN
= FastEmit_ri_(VT
, ISD::MUL
, IdxN
, IdxNIsKill
, ElementSize
, VT
);
468 // Unhandled operand. Halt "fast" selection and bail.
472 N
= FastEmit_rr(VT
, VT
, ISD::ADD
, N
, NIsKill
, IdxN
, IdxNIsKill
);
474 // Unhandled operand. Halt "fast" selection and bail.
479 // We successfully emitted code for the given LLVM Instruction.
480 UpdateValueMap(I
, N
);
484 bool FastISel::SelectCall(const User
*I
) {
485 const Function
*F
= cast
<CallInst
>(I
)->getCalledFunction();
486 if (!F
) return false;
488 // Handle selected intrinsic function calls.
489 switch (F
->getIntrinsicID()) {
491 case Intrinsic::dbg_declare
: {
492 const DbgDeclareInst
*DI
= cast
<DbgDeclareInst
>(I
);
493 if (!DIVariable(DI
->getVariable()).Verify() ||
494 !FuncInfo
.MF
->getMMI().hasDebugInfo())
497 const Value
*Address
= DI
->getAddress();
498 if (!Address
|| isa
<UndefValue
>(Address
) || isa
<AllocaInst
>(Address
))
503 if (const Argument
*Arg
= dyn_cast
<Argument
>(Address
)) {
504 if (Arg
->hasByValAttr()) {
505 // Byval arguments' frame index is recorded during argument lowering.
506 // Use this info directly.
507 Offset
= FuncInfo
.getByValArgumentFrameIndex(Arg
);
509 Reg
= TRI
.getFrameRegister(*FuncInfo
.MF
);
513 Reg
= getRegForValue(Address
);
516 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
,
517 TII
.get(TargetOpcode::DBG_VALUE
))
518 .addReg(Reg
, RegState::Debug
).addImm(Offset
)
519 .addMetadata(DI
->getVariable());
522 case Intrinsic::dbg_value
: {
523 // This form of DBG_VALUE is target-independent.
524 const DbgValueInst
*DI
= cast
<DbgValueInst
>(I
);
525 const TargetInstrDesc
&II
= TII
.get(TargetOpcode::DBG_VALUE
);
526 const Value
*V
= DI
->getValue();
528 // Currently the optimizer can produce this; insert an undef to
529 // help debugging. Probably the optimizer should not do this.
530 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
531 .addReg(0U).addImm(DI
->getOffset())
532 .addMetadata(DI
->getVariable());
533 } else if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
)) {
534 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
535 .addImm(CI
->getZExtValue()).addImm(DI
->getOffset())
536 .addMetadata(DI
->getVariable());
537 } else if (const ConstantFP
*CF
= dyn_cast
<ConstantFP
>(V
)) {
538 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
539 .addFPImm(CF
).addImm(DI
->getOffset())
540 .addMetadata(DI
->getVariable());
541 } else if (unsigned Reg
= lookUpRegForValue(V
)) {
542 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
543 .addReg(Reg
, RegState::Debug
).addImm(DI
->getOffset())
544 .addMetadata(DI
->getVariable());
546 // We can't yet handle anything else here because it would require
547 // generating code, thus altering codegen because of debug info.
548 DEBUG(dbgs() << "Dropping debug info for " << DI
);
552 case Intrinsic::eh_exception
: {
553 EVT VT
= TLI
.getValueType(I
->getType());
554 if (TLI
.getOperationAction(ISD::EXCEPTIONADDR
, VT
)!=TargetLowering::Expand
)
557 assert(FuncInfo
.MBB
->isLandingPad() &&
558 "Call to eh.exception not in landing pad!");
559 unsigned Reg
= TLI
.getExceptionAddressRegister();
560 const TargetRegisterClass
*RC
= TLI
.getRegClassFor(VT
);
561 unsigned ResultReg
= createResultReg(RC
);
562 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
563 ResultReg
).addReg(Reg
);
564 UpdateValueMap(I
, ResultReg
);
567 case Intrinsic::eh_selector
: {
568 EVT VT
= TLI
.getValueType(I
->getType());
569 if (TLI
.getOperationAction(ISD::EHSELECTION
, VT
) != TargetLowering::Expand
)
571 if (FuncInfo
.MBB
->isLandingPad())
572 AddCatchInfo(*cast
<CallInst
>(I
), &FuncInfo
.MF
->getMMI(), FuncInfo
.MBB
);
575 FuncInfo
.CatchInfoLost
.insert(cast
<CallInst
>(I
));
577 // FIXME: Mark exception selector register as live in. Hack for PR1508.
578 unsigned Reg
= TLI
.getExceptionSelectorRegister();
579 if (Reg
) FuncInfo
.MBB
->addLiveIn(Reg
);
582 unsigned Reg
= TLI
.getExceptionSelectorRegister();
583 EVT SrcVT
= TLI
.getPointerTy();
584 const TargetRegisterClass
*RC
= TLI
.getRegClassFor(SrcVT
);
585 unsigned ResultReg
= createResultReg(RC
);
586 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
587 ResultReg
).addReg(Reg
);
589 bool ResultRegIsKill
= hasTrivialKill(I
);
591 // Cast the register to the type of the selector.
592 if (SrcVT
.bitsGT(MVT::i32
))
593 ResultReg
= FastEmit_r(SrcVT
.getSimpleVT(), MVT::i32
, ISD::TRUNCATE
,
594 ResultReg
, ResultRegIsKill
);
595 else if (SrcVT
.bitsLT(MVT::i32
))
596 ResultReg
= FastEmit_r(SrcVT
.getSimpleVT(), MVT::i32
,
597 ISD::SIGN_EXTEND
, ResultReg
, ResultRegIsKill
);
599 // Unhandled operand. Halt "fast" selection and bail.
602 UpdateValueMap(I
, ResultReg
);
608 // An arbitrary call. Bail.
612 bool FastISel::SelectCast(const User
*I
, unsigned Opcode
) {
613 EVT SrcVT
= TLI
.getValueType(I
->getOperand(0)->getType());
614 EVT DstVT
= TLI
.getValueType(I
->getType());
616 if (SrcVT
== MVT::Other
|| !SrcVT
.isSimple() ||
617 DstVT
== MVT::Other
|| !DstVT
.isSimple())
618 // Unhandled type. Halt "fast" selection and bail.
621 // Check if the destination type is legal. Or as a special case,
622 // it may be i1 if we're doing a truncate because that's
623 // easy and somewhat common.
624 if (!TLI
.isTypeLegal(DstVT
))
625 if (DstVT
!= MVT::i1
|| Opcode
!= ISD::TRUNCATE
)
626 // Unhandled type. Halt "fast" selection and bail.
629 // Check if the source operand is legal. Or as a special case,
630 // it may be i1 if we're doing zero-extension because that's
631 // easy and somewhat common.
632 if (!TLI
.isTypeLegal(SrcVT
))
633 if (SrcVT
!= MVT::i1
|| Opcode
!= ISD::ZERO_EXTEND
)
634 // Unhandled type. Halt "fast" selection and bail.
637 unsigned InputReg
= getRegForValue(I
->getOperand(0));
639 // Unhandled operand. Halt "fast" selection and bail.
642 bool InputRegIsKill
= hasTrivialKill(I
->getOperand(0));
644 // If the operand is i1, arrange for the high bits in the register to be zero.
645 if (SrcVT
== MVT::i1
) {
646 SrcVT
= TLI
.getTypeToTransformTo(I
->getContext(), SrcVT
);
647 InputReg
= FastEmitZExtFromI1(SrcVT
.getSimpleVT(), InputReg
, InputRegIsKill
);
650 InputRegIsKill
= true;
652 // If the result is i1, truncate to the target's type for i1 first.
653 if (DstVT
== MVT::i1
)
654 DstVT
= TLI
.getTypeToTransformTo(I
->getContext(), DstVT
);
656 unsigned ResultReg
= FastEmit_r(SrcVT
.getSimpleVT(),
659 InputReg
, InputRegIsKill
);
663 UpdateValueMap(I
, ResultReg
);
667 bool FastISel::SelectBitCast(const User
*I
) {
668 // If the bitcast doesn't change the type, just use the operand value.
669 if (I
->getType() == I
->getOperand(0)->getType()) {
670 unsigned Reg
= getRegForValue(I
->getOperand(0));
673 UpdateValueMap(I
, Reg
);
677 // Bitcasts of other values become reg-reg copies or BITCAST operators.
678 EVT SrcVT
= TLI
.getValueType(I
->getOperand(0)->getType());
679 EVT DstVT
= TLI
.getValueType(I
->getType());
681 if (SrcVT
== MVT::Other
|| !SrcVT
.isSimple() ||
682 DstVT
== MVT::Other
|| !DstVT
.isSimple() ||
683 !TLI
.isTypeLegal(SrcVT
) || !TLI
.isTypeLegal(DstVT
))
684 // Unhandled type. Halt "fast" selection and bail.
687 unsigned Op0
= getRegForValue(I
->getOperand(0));
689 // Unhandled operand. Halt "fast" selection and bail.
692 bool Op0IsKill
= hasTrivialKill(I
->getOperand(0));
694 // First, try to perform the bitcast by inserting a reg-reg copy.
695 unsigned ResultReg
= 0;
696 if (SrcVT
.getSimpleVT() == DstVT
.getSimpleVT()) {
697 TargetRegisterClass
* SrcClass
= TLI
.getRegClassFor(SrcVT
);
698 TargetRegisterClass
* DstClass
= TLI
.getRegClassFor(DstVT
);
699 // Don't attempt a cross-class copy. It will likely fail.
700 if (SrcClass
== DstClass
) {
701 ResultReg
= createResultReg(DstClass
);
702 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
703 ResultReg
).addReg(Op0
);
707 // If the reg-reg copy failed, select a BITCAST opcode.
709 ResultReg
= FastEmit_r(SrcVT
.getSimpleVT(), DstVT
.getSimpleVT(),
710 ISD::BITCAST
, Op0
, Op0IsKill
);
715 UpdateValueMap(I
, ResultReg
);
720 FastISel::SelectInstruction(const Instruction
*I
) {
721 // Just before the terminator instruction, insert instructions to
722 // feed PHI nodes in successor blocks.
723 if (isa
<TerminatorInst
>(I
))
724 if (!HandlePHINodesInSuccessorBlocks(I
->getParent()))
727 DL
= I
->getDebugLoc();
729 // First, try doing target-independent selection.
730 if (SelectOperator(I
, I
->getOpcode())) {
735 // Next, try calling the target to attempt to handle the instruction.
736 if (TargetSelectInstruction(I
)) {
745 /// FastEmitBranch - Emit an unconditional branch to the given block,
746 /// unless it is the immediate (fall-through) successor, and update
749 FastISel::FastEmitBranch(MachineBasicBlock
*MSucc
, DebugLoc DL
) {
750 if (FuncInfo
.MBB
->isLayoutSuccessor(MSucc
)) {
751 // The unconditional fall-through case, which needs no instructions.
753 // The unconditional branch case.
754 TII
.InsertBranch(*FuncInfo
.MBB
, MSucc
, NULL
,
755 SmallVector
<MachineOperand
, 0>(), DL
);
757 FuncInfo
.MBB
->addSuccessor(MSucc
);
760 /// SelectFNeg - Emit an FNeg operation.
763 FastISel::SelectFNeg(const User
*I
) {
764 unsigned OpReg
= getRegForValue(BinaryOperator::getFNegArgument(I
));
765 if (OpReg
== 0) return false;
767 bool OpRegIsKill
= hasTrivialKill(I
);
769 // If the target has ISD::FNEG, use it.
770 EVT VT
= TLI
.getValueType(I
->getType());
771 unsigned ResultReg
= FastEmit_r(VT
.getSimpleVT(), VT
.getSimpleVT(),
772 ISD::FNEG
, OpReg
, OpRegIsKill
);
773 if (ResultReg
!= 0) {
774 UpdateValueMap(I
, ResultReg
);
778 // Bitcast the value to integer, twiddle the sign bit with xor,
779 // and then bitcast it back to floating-point.
780 if (VT
.getSizeInBits() > 64) return false;
781 EVT IntVT
= EVT::getIntegerVT(I
->getContext(), VT
.getSizeInBits());
782 if (!TLI
.isTypeLegal(IntVT
))
785 unsigned IntReg
= FastEmit_r(VT
.getSimpleVT(), IntVT
.getSimpleVT(),
786 ISD::BITCAST
, OpReg
, OpRegIsKill
);
790 unsigned IntResultReg
= FastEmit_ri_(IntVT
.getSimpleVT(), ISD::XOR
,
791 IntReg
, /*Kill=*/true,
792 UINT64_C(1) << (VT
.getSizeInBits()-1),
793 IntVT
.getSimpleVT());
794 if (IntResultReg
== 0)
797 ResultReg
= FastEmit_r(IntVT
.getSimpleVT(), VT
.getSimpleVT(),
798 ISD::BITCAST
, IntResultReg
, /*Kill=*/true);
802 UpdateValueMap(I
, ResultReg
);
807 FastISel::SelectOperator(const User
*I
, unsigned Opcode
) {
809 case Instruction::Add
:
810 return SelectBinaryOp(I
, ISD::ADD
);
811 case Instruction::FAdd
:
812 return SelectBinaryOp(I
, ISD::FADD
);
813 case Instruction::Sub
:
814 return SelectBinaryOp(I
, ISD::SUB
);
815 case Instruction::FSub
:
816 // FNeg is currently represented in LLVM IR as a special case of FSub.
817 if (BinaryOperator::isFNeg(I
))
818 return SelectFNeg(I
);
819 return SelectBinaryOp(I
, ISD::FSUB
);
820 case Instruction::Mul
:
821 return SelectBinaryOp(I
, ISD::MUL
);
822 case Instruction::FMul
:
823 return SelectBinaryOp(I
, ISD::FMUL
);
824 case Instruction::SDiv
:
825 return SelectBinaryOp(I
, ISD::SDIV
);
826 case Instruction::UDiv
:
827 return SelectBinaryOp(I
, ISD::UDIV
);
828 case Instruction::FDiv
:
829 return SelectBinaryOp(I
, ISD::FDIV
);
830 case Instruction::SRem
:
831 return SelectBinaryOp(I
, ISD::SREM
);
832 case Instruction::URem
:
833 return SelectBinaryOp(I
, ISD::UREM
);
834 case Instruction::FRem
:
835 return SelectBinaryOp(I
, ISD::FREM
);
836 case Instruction::Shl
:
837 return SelectBinaryOp(I
, ISD::SHL
);
838 case Instruction::LShr
:
839 return SelectBinaryOp(I
, ISD::SRL
);
840 case Instruction::AShr
:
841 return SelectBinaryOp(I
, ISD::SRA
);
842 case Instruction::And
:
843 return SelectBinaryOp(I
, ISD::AND
);
844 case Instruction::Or
:
845 return SelectBinaryOp(I
, ISD::OR
);
846 case Instruction::Xor
:
847 return SelectBinaryOp(I
, ISD::XOR
);
849 case Instruction::GetElementPtr
:
850 return SelectGetElementPtr(I
);
852 case Instruction::Br
: {
853 const BranchInst
*BI
= cast
<BranchInst
>(I
);
855 if (BI
->isUnconditional()) {
856 const BasicBlock
*LLVMSucc
= BI
->getSuccessor(0);
857 MachineBasicBlock
*MSucc
= FuncInfo
.MBBMap
[LLVMSucc
];
858 FastEmitBranch(MSucc
, BI
->getDebugLoc());
862 // Conditional branches are not handed yet.
863 // Halt "fast" selection and bail.
867 case Instruction::Unreachable
:
871 case Instruction::Alloca
:
872 // FunctionLowering has the static-sized case covered.
873 if (FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(I
)))
876 // Dynamic-sized alloca is not handled yet.
879 case Instruction::Call
:
880 return SelectCall(I
);
882 case Instruction::BitCast
:
883 return SelectBitCast(I
);
885 case Instruction::FPToSI
:
886 return SelectCast(I
, ISD::FP_TO_SINT
);
887 case Instruction::ZExt
:
888 return SelectCast(I
, ISD::ZERO_EXTEND
);
889 case Instruction::SExt
:
890 return SelectCast(I
, ISD::SIGN_EXTEND
);
891 case Instruction::Trunc
:
892 return SelectCast(I
, ISD::TRUNCATE
);
893 case Instruction::SIToFP
:
894 return SelectCast(I
, ISD::SINT_TO_FP
);
896 case Instruction::IntToPtr
: // Deliberate fall-through.
897 case Instruction::PtrToInt
: {
898 EVT SrcVT
= TLI
.getValueType(I
->getOperand(0)->getType());
899 EVT DstVT
= TLI
.getValueType(I
->getType());
900 if (DstVT
.bitsGT(SrcVT
))
901 return SelectCast(I
, ISD::ZERO_EXTEND
);
902 if (DstVT
.bitsLT(SrcVT
))
903 return SelectCast(I
, ISD::TRUNCATE
);
904 unsigned Reg
= getRegForValue(I
->getOperand(0));
905 if (Reg
== 0) return false;
906 UpdateValueMap(I
, Reg
);
910 case Instruction::PHI
:
911 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
914 // Unhandled instruction. Halt "fast" selection and bail.
919 FastISel::FastISel(FunctionLoweringInfo
&funcInfo
)
920 : FuncInfo(funcInfo
),
921 MRI(FuncInfo
.MF
->getRegInfo()),
922 MFI(*FuncInfo
.MF
->getFrameInfo()),
923 MCP(*FuncInfo
.MF
->getConstantPool()),
924 TM(FuncInfo
.MF
->getTarget()),
925 TD(*TM
.getTargetData()),
926 TII(*TM
.getInstrInfo()),
927 TLI(*TM
.getTargetLowering()),
928 TRI(*TM
.getRegisterInfo()) {
931 FastISel::~FastISel() {}
933 unsigned FastISel::FastEmit_(MVT
, MVT
,
938 unsigned FastISel::FastEmit_r(MVT
, MVT
,
940 unsigned /*Op0*/, bool /*Op0IsKill*/) {
944 unsigned FastISel::FastEmit_rr(MVT
, MVT
,
946 unsigned /*Op0*/, bool /*Op0IsKill*/,
947 unsigned /*Op1*/, bool /*Op1IsKill*/) {
951 unsigned FastISel::FastEmit_i(MVT
, MVT
, unsigned, uint64_t /*Imm*/) {
955 unsigned FastISel::FastEmit_f(MVT
, MVT
,
956 unsigned, const ConstantFP
* /*FPImm*/) {
960 unsigned FastISel::FastEmit_ri(MVT
, MVT
,
962 unsigned /*Op0*/, bool /*Op0IsKill*/,
967 unsigned FastISel::FastEmit_rf(MVT
, MVT
,
969 unsigned /*Op0*/, bool /*Op0IsKill*/,
970 const ConstantFP
* /*FPImm*/) {
974 unsigned FastISel::FastEmit_rri(MVT
, MVT
,
976 unsigned /*Op0*/, bool /*Op0IsKill*/,
977 unsigned /*Op1*/, bool /*Op1IsKill*/,
982 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
983 /// to emit an instruction with an immediate operand using FastEmit_ri.
984 /// If that fails, it materializes the immediate into a register and try
985 /// FastEmit_rr instead.
986 unsigned FastISel::FastEmit_ri_(MVT VT
, unsigned Opcode
,
987 unsigned Op0
, bool Op0IsKill
,
988 uint64_t Imm
, MVT ImmType
) {
989 // If this is a multiply by a power of two, emit this as a shift left.
990 if (Opcode
== ISD::MUL
&& isPowerOf2_64(Imm
)) {
993 } else if (Opcode
== ISD::UDIV
&& isPowerOf2_64(Imm
)) {
994 // div x, 8 -> srl x, 3
999 // Horrible hack (to be removed), check to make sure shift amounts are
1001 if ((Opcode
== ISD::SHL
|| Opcode
== ISD::SRA
|| Opcode
== ISD::SRL
) &&
1002 Imm
>= VT
.getSizeInBits())
1005 // First check if immediate type is legal. If not, we can't use the ri form.
1006 unsigned ResultReg
= FastEmit_ri(VT
, VT
, Opcode
, Op0
, Op0IsKill
, Imm
);
1009 unsigned MaterialReg
= FastEmit_i(ImmType
, ImmType
, ISD::Constant
, Imm
);
1010 if (MaterialReg
== 0)
1012 return FastEmit_rr(VT
, VT
, Opcode
,
1014 MaterialReg
, /*Kill=*/true);
1017 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
1018 /// to emit an instruction with a floating-point immediate operand using
1019 /// FastEmit_rf. If that fails, it materializes the immediate into a register
1020 /// and try FastEmit_rr instead.
1021 unsigned FastISel::FastEmit_rf_(MVT VT
, unsigned Opcode
,
1022 unsigned Op0
, bool Op0IsKill
,
1023 const ConstantFP
*FPImm
, MVT ImmType
) {
1024 // First check if immediate type is legal. If not, we can't use the rf form.
1025 unsigned ResultReg
= FastEmit_rf(VT
, VT
, Opcode
, Op0
, Op0IsKill
, FPImm
);
1029 // Materialize the constant in a register.
1030 unsigned MaterialReg
= FastEmit_f(ImmType
, ImmType
, ISD::ConstantFP
, FPImm
);
1031 if (MaterialReg
== 0) {
1032 // If the target doesn't have a way to directly enter a floating-point
1033 // value into a register, use an alternate approach.
1034 // TODO: The current approach only supports floating-point constants
1035 // that can be constructed by conversion from integer values. This should
1036 // be replaced by code that creates a load from a constant-pool entry,
1037 // which will require some target-specific work.
1038 const APFloat
&Flt
= FPImm
->getValueAPF();
1039 EVT IntVT
= TLI
.getPointerTy();
1042 uint32_t IntBitWidth
= IntVT
.getSizeInBits();
1044 (void) Flt
.convertToInteger(x
, IntBitWidth
, /*isSigned=*/true,
1045 APFloat::rmTowardZero
, &isExact
);
1048 APInt
IntVal(IntBitWidth
, 2, x
);
1050 unsigned IntegerReg
= FastEmit_i(IntVT
.getSimpleVT(), IntVT
.getSimpleVT(),
1051 ISD::Constant
, IntVal
.getZExtValue());
1052 if (IntegerReg
== 0)
1054 MaterialReg
= FastEmit_r(IntVT
.getSimpleVT(), VT
,
1055 ISD::SINT_TO_FP
, IntegerReg
, /*Kill=*/true);
1056 if (MaterialReg
== 0)
1059 return FastEmit_rr(VT
, VT
, Opcode
,
1061 MaterialReg
, /*Kill=*/true);
1064 unsigned FastISel::createResultReg(const TargetRegisterClass
* RC
) {
1065 return MRI
.createVirtualRegister(RC
);
1068 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode
,
1069 const TargetRegisterClass
* RC
) {
1070 unsigned ResultReg
= createResultReg(RC
);
1071 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1073 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
);
1077 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode
,
1078 const TargetRegisterClass
*RC
,
1079 unsigned Op0
, bool Op0IsKill
) {
1080 unsigned ResultReg
= createResultReg(RC
);
1081 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1083 if (II
.getNumDefs() >= 1)
1084 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1085 .addReg(Op0
, Op0IsKill
* RegState::Kill
);
1087 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1088 .addReg(Op0
, Op0IsKill
* RegState::Kill
);
1089 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1090 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1096 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode
,
1097 const TargetRegisterClass
*RC
,
1098 unsigned Op0
, bool Op0IsKill
,
1099 unsigned Op1
, bool Op1IsKill
) {
1100 unsigned ResultReg
= createResultReg(RC
);
1101 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1103 if (II
.getNumDefs() >= 1)
1104 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1105 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1106 .addReg(Op1
, Op1IsKill
* RegState::Kill
);
1108 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1109 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1110 .addReg(Op1
, Op1IsKill
* RegState::Kill
);
1111 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1112 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1117 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode
,
1118 const TargetRegisterClass
*RC
,
1119 unsigned Op0
, bool Op0IsKill
,
1121 unsigned ResultReg
= createResultReg(RC
);
1122 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1124 if (II
.getNumDefs() >= 1)
1125 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1126 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1129 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1130 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1132 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1133 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1138 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode
,
1139 const TargetRegisterClass
*RC
,
1140 unsigned Op0
, bool Op0IsKill
,
1141 uint64_t Imm1
, uint64_t Imm2
) {
1142 unsigned ResultReg
= createResultReg(RC
);
1143 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1145 if (II
.getNumDefs() >= 1)
1146 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1147 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1151 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1152 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1155 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1156 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1161 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode
,
1162 const TargetRegisterClass
*RC
,
1163 unsigned Op0
, bool Op0IsKill
,
1164 const ConstantFP
*FPImm
) {
1165 unsigned ResultReg
= createResultReg(RC
);
1166 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1168 if (II
.getNumDefs() >= 1)
1169 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1170 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1173 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1174 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1176 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1177 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1182 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode
,
1183 const TargetRegisterClass
*RC
,
1184 unsigned Op0
, bool Op0IsKill
,
1185 unsigned Op1
, bool Op1IsKill
,
1187 unsigned ResultReg
= createResultReg(RC
);
1188 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1190 if (II
.getNumDefs() >= 1)
1191 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1192 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1193 .addReg(Op1
, Op1IsKill
* RegState::Kill
)
1196 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1197 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1198 .addReg(Op1
, Op1IsKill
* RegState::Kill
)
1200 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1201 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1206 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode
,
1207 const TargetRegisterClass
*RC
,
1209 unsigned ResultReg
= createResultReg(RC
);
1210 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1212 if (II
.getNumDefs() >= 1)
1213 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
).addImm(Imm
);
1215 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
).addImm(Imm
);
1216 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1217 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1222 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT
,
1223 unsigned Op0
, bool Op0IsKill
,
1225 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(RetVT
));
1226 assert(TargetRegisterInfo::isVirtualRegister(Op0
) &&
1227 "Cannot yet extract from physregs");
1228 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
,
1229 DL
, TII
.get(TargetOpcode::COPY
), ResultReg
)
1230 .addReg(Op0
, getKillRegState(Op0IsKill
), Idx
);
1234 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1235 /// with all but the least significant bit set to zero.
1236 unsigned FastISel::FastEmitZExtFromI1(MVT VT
, unsigned Op0
, bool Op0IsKill
) {
1237 return FastEmit_ri(VT
, VT
, ISD::AND
, Op0
, Op0IsKill
, 1);
1240 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1241 /// Emit code to ensure constants are copied into registers when needed.
1242 /// Remember the virtual registers that need to be added to the Machine PHI
1243 /// nodes as input. We cannot just directly add them, because expansion
1244 /// might result in multiple MBB's for one BB. As such, the start of the
1245 /// BB might correspond to a different MBB than the end.
1246 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock
*LLVMBB
) {
1247 const TerminatorInst
*TI
= LLVMBB
->getTerminator();
1249 SmallPtrSet
<MachineBasicBlock
*, 4> SuccsHandled
;
1250 unsigned OrigNumPHINodesToUpdate
= FuncInfo
.PHINodesToUpdate
.size();
1252 // Check successor nodes' PHI nodes that expect a constant to be available
1254 for (unsigned succ
= 0, e
= TI
->getNumSuccessors(); succ
!= e
; ++succ
) {
1255 const BasicBlock
*SuccBB
= TI
->getSuccessor(succ
);
1256 if (!isa
<PHINode
>(SuccBB
->begin())) continue;
1257 MachineBasicBlock
*SuccMBB
= FuncInfo
.MBBMap
[SuccBB
];
1259 // If this terminator has multiple identical successors (common for
1260 // switches), only handle each succ once.
1261 if (!SuccsHandled
.insert(SuccMBB
)) continue;
1263 MachineBasicBlock::iterator MBBI
= SuccMBB
->begin();
1265 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1266 // nodes and Machine PHI nodes, but the incoming operands have not been
1268 for (BasicBlock::const_iterator I
= SuccBB
->begin();
1269 const PHINode
*PN
= dyn_cast
<PHINode
>(I
); ++I
) {
1271 // Ignore dead phi's.
1272 if (PN
->use_empty()) continue;
1274 // Only handle legal types. Two interesting things to note here. First,
1275 // by bailing out early, we may leave behind some dead instructions,
1276 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1277 // own moves. Second, this check is necessary because FastISel doesn't
1278 // use CreateRegs to create registers, so it always creates
1279 // exactly one register for each non-void instruction.
1280 EVT VT
= TLI
.getValueType(PN
->getType(), /*AllowUnknown=*/true);
1281 if (VT
== MVT::Other
|| !TLI
.isTypeLegal(VT
)) {
1284 VT
= TLI
.getTypeToTransformTo(LLVMBB
->getContext(), VT
);
1286 FuncInfo
.PHINodesToUpdate
.resize(OrigNumPHINodesToUpdate
);
1291 const Value
*PHIOp
= PN
->getIncomingValueForBlock(LLVMBB
);
1293 // Set the DebugLoc for the copy. Prefer the location of the operand
1294 // if there is one; use the location of the PHI otherwise.
1295 DL
= PN
->getDebugLoc();
1296 if (const Instruction
*Inst
= dyn_cast
<Instruction
>(PHIOp
))
1297 DL
= Inst
->getDebugLoc();
1299 unsigned Reg
= getRegForValue(PHIOp
);
1301 FuncInfo
.PHINodesToUpdate
.resize(OrigNumPHINodesToUpdate
);
1304 FuncInfo
.PHINodesToUpdate
.push_back(std::make_pair(MBBI
++, Reg
));