1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/CodeGen/FastISel.h"
47 #include "llvm/CodeGen/FunctionLoweringInfo.h"
48 #include "llvm/CodeGen/MachineInstrBuilder.h"
49 #include "llvm/CodeGen/MachineModuleInfo.h"
50 #include "llvm/CodeGen/MachineRegisterInfo.h"
51 #include "llvm/Analysis/DebugInfo.h"
52 #include "llvm/Analysis/Loads.h"
53 #include "llvm/Target/TargetData.h"
54 #include "llvm/Target/TargetInstrInfo.h"
55 #include "llvm/Target/TargetLowering.h"
56 #include "llvm/Target/TargetMachine.h"
57 #include "llvm/Support/ErrorHandling.h"
60 /// startNewBlock - Set the current block to which generated machine
61 /// instructions will be appended, and clear the local CSE map.
63 void FastISel::startNewBlock() {
64 LocalValueMap
.clear();
66 // Start out as null, meaining no local-value instructions have
70 // Advance the last local value past any EH_LABEL instructions.
71 MachineBasicBlock::iterator
72 I
= FuncInfo
.MBB
->begin(), E
= FuncInfo
.MBB
->end();
73 while (I
!= E
&& I
->getOpcode() == TargetOpcode::EH_LABEL
) {
79 bool FastISel::hasTrivialKill(const Value
*V
) const {
80 // Don't consider constants or arguments to have trivial kills.
81 const Instruction
*I
= dyn_cast
<Instruction
>(V
);
85 // No-op casts are trivially coalesced by fast-isel.
86 if (const CastInst
*Cast
= dyn_cast
<CastInst
>(I
))
87 if (Cast
->isNoopCast(TD
.getIntPtrType(Cast
->getContext())) &&
88 !hasTrivialKill(Cast
->getOperand(0)))
91 // Only instructions with a single use in the same basic block are considered
92 // to have trivial kills.
93 return I
->hasOneUse() &&
94 !(I
->getOpcode() == Instruction::BitCast
||
95 I
->getOpcode() == Instruction::PtrToInt
||
96 I
->getOpcode() == Instruction::IntToPtr
) &&
97 cast
<Instruction
>(*I
->use_begin())->getParent() == I
->getParent();
100 unsigned FastISel::getRegForValue(const Value
*V
) {
101 EVT RealVT
= TLI
.getValueType(V
->getType(), /*AllowUnknown=*/true);
102 // Don't handle non-simple values in FastISel.
103 if (!RealVT
.isSimple())
106 // Ignore illegal types. We must do this before looking up the value
107 // in ValueMap because Arguments are given virtual registers regardless
108 // of whether FastISel can handle them.
109 MVT VT
= RealVT
.getSimpleVT();
110 if (!TLI
.isTypeLegal(VT
)) {
111 // Promote MVT::i1 to a legal type though, because it's common and easy.
113 VT
= TLI
.getTypeToTransformTo(V
->getContext(), VT
).getSimpleVT();
118 // Look up the value to see if we already have a register for it. We
119 // cache values defined by Instructions across blocks, and other values
120 // only locally. This is because Instructions already have the SSA
121 // def-dominates-use requirement enforced.
122 DenseMap
<const Value
*, unsigned>::iterator I
= FuncInfo
.ValueMap
.find(V
);
123 if (I
!= FuncInfo
.ValueMap
.end()) {
124 unsigned Reg
= I
->second
;
127 unsigned Reg
= LocalValueMap
[V
];
131 // In bottom-up mode, just create the virtual register which will be used
132 // to hold the value. It will be materialized later.
133 if (isa
<Instruction
>(V
) &&
134 (!isa
<AllocaInst
>(V
) ||
135 !FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(V
))))
136 return FuncInfo
.InitializeRegForValue(V
);
138 SavePoint SaveInsertPt
= enterLocalValueArea();
140 // Materialize the value in a register. Emit any instructions in the
142 Reg
= materializeRegForValue(V
, VT
);
144 leaveLocalValueArea(SaveInsertPt
);
149 /// materializeRegForValue - Helper for getRegForValue. This function is
150 /// called when the value isn't already available in a register and must
151 /// be materialized with new instructions.
152 unsigned FastISel::materializeRegForValue(const Value
*V
, MVT VT
) {
155 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
)) {
156 if (CI
->getValue().getActiveBits() <= 64)
157 Reg
= FastEmit_i(VT
, VT
, ISD::Constant
, CI
->getZExtValue());
158 } else if (isa
<AllocaInst
>(V
)) {
159 Reg
= TargetMaterializeAlloca(cast
<AllocaInst
>(V
));
160 } else if (isa
<ConstantPointerNull
>(V
)) {
161 // Translate this as an integer zero so that it can be
162 // local-CSE'd with actual integer zeros.
164 getRegForValue(Constant::getNullValue(TD
.getIntPtrType(V
->getContext())));
165 } else if (const ConstantFP
*CF
= dyn_cast
<ConstantFP
>(V
)) {
166 // Try to emit the constant directly.
167 Reg
= FastEmit_f(VT
, VT
, ISD::ConstantFP
, CF
);
170 // Try to emit the constant by using an integer constant with a cast.
171 const APFloat
&Flt
= CF
->getValueAPF();
172 EVT IntVT
= TLI
.getPointerTy();
175 uint32_t IntBitWidth
= IntVT
.getSizeInBits();
177 (void) Flt
.convertToInteger(x
, IntBitWidth
, /*isSigned=*/true,
178 APFloat::rmTowardZero
, &isExact
);
180 APInt
IntVal(IntBitWidth
, 2, x
);
182 unsigned IntegerReg
=
183 getRegForValue(ConstantInt::get(V
->getContext(), IntVal
));
185 Reg
= FastEmit_r(IntVT
.getSimpleVT(), VT
, ISD::SINT_TO_FP
,
186 IntegerReg
, /*Kill=*/false);
189 } else if (const Operator
*Op
= dyn_cast
<Operator
>(V
)) {
190 if (!SelectOperator(Op
, Op
->getOpcode()))
191 if (!isa
<Instruction
>(Op
) ||
192 !TargetSelectInstruction(cast
<Instruction
>(Op
)))
194 Reg
= lookUpRegForValue(Op
);
195 } else if (isa
<UndefValue
>(V
)) {
196 Reg
= createResultReg(TLI
.getRegClassFor(VT
));
197 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
,
198 TII
.get(TargetOpcode::IMPLICIT_DEF
), Reg
);
201 // If target-independent code couldn't handle the value, give target-specific
203 if (!Reg
&& isa
<Constant
>(V
))
204 Reg
= TargetMaterializeConstant(cast
<Constant
>(V
));
206 // Don't cache constant materializations in the general ValueMap.
207 // To do so would require tracking what uses they dominate.
209 LocalValueMap
[V
] = Reg
;
210 LastLocalValue
= MRI
.getVRegDef(Reg
);
215 unsigned FastISel::lookUpRegForValue(const Value
*V
) {
216 // Look up the value to see if we already have a register for it. We
217 // cache values defined by Instructions across blocks, and other values
218 // only locally. This is because Instructions already have the SSA
219 // def-dominates-use requirement enforced.
220 DenseMap
<const Value
*, unsigned>::iterator I
= FuncInfo
.ValueMap
.find(V
);
221 if (I
!= FuncInfo
.ValueMap
.end())
223 return LocalValueMap
[V
];
226 /// UpdateValueMap - Update the value map to include the new mapping for this
227 /// instruction, or insert an extra copy to get the result in a previous
228 /// determined register.
229 /// NOTE: This is only necessary because we might select a block that uses
230 /// a value before we select the block that defines the value. It might be
231 /// possible to fix this by selecting blocks in reverse postorder.
232 unsigned FastISel::UpdateValueMap(const Value
*I
, unsigned Reg
) {
233 if (!isa
<Instruction
>(I
)) {
234 LocalValueMap
[I
] = Reg
;
238 unsigned &AssignedReg
= FuncInfo
.ValueMap
[I
];
239 if (AssignedReg
== 0)
240 // Use the new register.
242 else if (Reg
!= AssignedReg
) {
243 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
244 FuncInfo
.RegFixups
[AssignedReg
] = Reg
;
252 std::pair
<unsigned, bool> FastISel::getRegForGEPIndex(const Value
*Idx
) {
253 unsigned IdxN
= getRegForValue(Idx
);
255 // Unhandled operand. Halt "fast" selection and bail.
256 return std::pair
<unsigned, bool>(0, false);
258 bool IdxNIsKill
= hasTrivialKill(Idx
);
260 // If the index is smaller or larger than intptr_t, truncate or extend it.
261 MVT PtrVT
= TLI
.getPointerTy();
262 EVT IdxVT
= EVT::getEVT(Idx
->getType(), /*HandleUnknown=*/false);
263 if (IdxVT
.bitsLT(PtrVT
)) {
264 IdxN
= FastEmit_r(IdxVT
.getSimpleVT(), PtrVT
, ISD::SIGN_EXTEND
,
268 else if (IdxVT
.bitsGT(PtrVT
)) {
269 IdxN
= FastEmit_r(IdxVT
.getSimpleVT(), PtrVT
, ISD::TRUNCATE
,
273 return std::pair
<unsigned, bool>(IdxN
, IdxNIsKill
);
276 void FastISel::recomputeInsertPt() {
277 if (getLastLocalValue()) {
278 FuncInfo
.InsertPt
= getLastLocalValue();
279 FuncInfo
.MBB
= FuncInfo
.InsertPt
->getParent();
282 FuncInfo
.InsertPt
= FuncInfo
.MBB
->getFirstNonPHI();
284 // Now skip past any EH_LABELs, which must remain at the beginning.
285 while (FuncInfo
.InsertPt
!= FuncInfo
.MBB
->end() &&
286 FuncInfo
.InsertPt
->getOpcode() == TargetOpcode::EH_LABEL
)
290 FastISel::SavePoint
FastISel::enterLocalValueArea() {
291 MachineBasicBlock::iterator OldInsertPt
= FuncInfo
.InsertPt
;
295 SavePoint SP
= { OldInsertPt
, OldDL
};
299 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt
) {
300 if (FuncInfo
.InsertPt
!= FuncInfo
.MBB
->begin())
301 LastLocalValue
= llvm::prior(FuncInfo
.InsertPt
);
303 // Restore the previous insert position.
304 FuncInfo
.InsertPt
= OldInsertPt
.InsertPt
;
308 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
309 /// which has an opcode which directly corresponds to the given ISD opcode.
311 bool FastISel::SelectBinaryOp(const User
*I
, unsigned ISDOpcode
) {
312 EVT VT
= EVT::getEVT(I
->getType(), /*HandleUnknown=*/true);
313 if (VT
== MVT::Other
|| !VT
.isSimple())
314 // Unhandled type. Halt "fast" selection and bail.
317 // We only handle legal types. For example, on x86-32 the instruction
318 // selector contains all of the 64-bit instructions from x86-64,
319 // under the assumption that i64 won't be used if the target doesn't
321 if (!TLI
.isTypeLegal(VT
)) {
322 // MVT::i1 is special. Allow AND, OR, or XOR because they
323 // don't require additional zeroing, which makes them easy.
325 (ISDOpcode
== ISD::AND
|| ISDOpcode
== ISD::OR
||
326 ISDOpcode
== ISD::XOR
))
327 VT
= TLI
.getTypeToTransformTo(I
->getContext(), VT
);
332 unsigned Op0
= getRegForValue(I
->getOperand(0));
334 // Unhandled operand. Halt "fast" selection and bail.
337 bool Op0IsKill
= hasTrivialKill(I
->getOperand(0));
339 // Check if the second operand is a constant and handle it appropriately.
340 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
341 unsigned ResultReg
= FastEmit_ri(VT
.getSimpleVT(), VT
.getSimpleVT(),
342 ISDOpcode
, Op0
, Op0IsKill
,
344 if (ResultReg
!= 0) {
345 // We successfully emitted code for the given LLVM Instruction.
346 UpdateValueMap(I
, ResultReg
);
351 // Check if the second operand is a constant float.
352 if (ConstantFP
*CF
= dyn_cast
<ConstantFP
>(I
->getOperand(1))) {
353 unsigned ResultReg
= FastEmit_rf(VT
.getSimpleVT(), VT
.getSimpleVT(),
354 ISDOpcode
, Op0
, Op0IsKill
, CF
);
355 if (ResultReg
!= 0) {
356 // We successfully emitted code for the given LLVM Instruction.
357 UpdateValueMap(I
, ResultReg
);
362 unsigned Op1
= getRegForValue(I
->getOperand(1));
364 // Unhandled operand. Halt "fast" selection and bail.
367 bool Op1IsKill
= hasTrivialKill(I
->getOperand(1));
369 // Now we have both operands in registers. Emit the instruction.
370 unsigned ResultReg
= FastEmit_rr(VT
.getSimpleVT(), VT
.getSimpleVT(),
375 // Target-specific code wasn't able to find a machine opcode for
376 // the given ISD opcode and type. Halt "fast" selection and bail.
379 // We successfully emitted code for the given LLVM Instruction.
380 UpdateValueMap(I
, ResultReg
);
384 bool FastISel::SelectGetElementPtr(const User
*I
) {
385 unsigned N
= getRegForValue(I
->getOperand(0));
387 // Unhandled operand. Halt "fast" selection and bail.
390 bool NIsKill
= hasTrivialKill(I
->getOperand(0));
392 const Type
*Ty
= I
->getOperand(0)->getType();
393 MVT VT
= TLI
.getPointerTy();
394 for (GetElementPtrInst::const_op_iterator OI
= I
->op_begin()+1,
395 E
= I
->op_end(); OI
!= E
; ++OI
) {
396 const Value
*Idx
= *OI
;
397 if (const StructType
*StTy
= dyn_cast
<StructType
>(Ty
)) {
398 unsigned Field
= cast
<ConstantInt
>(Idx
)->getZExtValue();
401 uint64_t Offs
= TD
.getStructLayout(StTy
)->getElementOffset(Field
);
402 // FIXME: This can be optimized by combining the add with a
404 N
= FastEmit_ri_(VT
, ISD::ADD
, N
, NIsKill
, Offs
, VT
);
406 // Unhandled operand. Halt "fast" selection and bail.
410 Ty
= StTy
->getElementType(Field
);
412 Ty
= cast
<SequentialType
>(Ty
)->getElementType();
414 // If this is a constant subscript, handle it quickly.
415 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Idx
)) {
416 if (CI
->isZero()) continue;
418 TD
.getTypeAllocSize(Ty
)*cast
<ConstantInt
>(CI
)->getSExtValue();
419 N
= FastEmit_ri_(VT
, ISD::ADD
, N
, NIsKill
, Offs
, VT
);
421 // Unhandled operand. Halt "fast" selection and bail.
427 // N = N + Idx * ElementSize;
428 uint64_t ElementSize
= TD
.getTypeAllocSize(Ty
);
429 std::pair
<unsigned, bool> Pair
= getRegForGEPIndex(Idx
);
430 unsigned IdxN
= Pair
.first
;
431 bool IdxNIsKill
= Pair
.second
;
433 // Unhandled operand. Halt "fast" selection and bail.
436 if (ElementSize
!= 1) {
437 IdxN
= FastEmit_ri_(VT
, ISD::MUL
, IdxN
, IdxNIsKill
, ElementSize
, VT
);
439 // Unhandled operand. Halt "fast" selection and bail.
443 N
= FastEmit_rr(VT
, VT
, ISD::ADD
, N
, NIsKill
, IdxN
, IdxNIsKill
);
445 // Unhandled operand. Halt "fast" selection and bail.
450 // We successfully emitted code for the given LLVM Instruction.
451 UpdateValueMap(I
, N
);
455 bool FastISel::SelectCall(const User
*I
) {
456 const Function
*F
= cast
<CallInst
>(I
)->getCalledFunction();
457 if (!F
) return false;
459 // Handle selected intrinsic function calls.
460 unsigned IID
= F
->getIntrinsicID();
463 case Intrinsic::dbg_declare
: {
464 const DbgDeclareInst
*DI
= cast
<DbgDeclareInst
>(I
);
465 if (!DIVariable(DI
->getVariable()).Verify() ||
466 !FuncInfo
.MF
->getMMI().hasDebugInfo())
469 const Value
*Address
= DI
->getAddress();
470 if (!Address
|| isa
<UndefValue
>(Address
) || isa
<AllocaInst
>(Address
))
475 if (const Argument
*Arg
= dyn_cast
<Argument
>(Address
)) {
476 if (Arg
->hasByValAttr()) {
477 // Byval arguments' frame index is recorded during argument lowering.
478 // Use this info directly.
479 Offset
= FuncInfo
.getByValArgumentFrameIndex(Arg
);
481 Reg
= TRI
.getFrameRegister(*FuncInfo
.MF
);
485 Reg
= getRegForValue(Address
);
488 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
,
489 TII
.get(TargetOpcode::DBG_VALUE
))
490 .addReg(Reg
, RegState::Debug
).addImm(Offset
)
491 .addMetadata(DI
->getVariable());
494 case Intrinsic::dbg_value
: {
495 // This form of DBG_VALUE is target-independent.
496 const DbgValueInst
*DI
= cast
<DbgValueInst
>(I
);
497 const TargetInstrDesc
&II
= TII
.get(TargetOpcode::DBG_VALUE
);
498 const Value
*V
= DI
->getValue();
500 // Currently the optimizer can produce this; insert an undef to
501 // help debugging. Probably the optimizer should not do this.
502 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
503 .addReg(0U).addImm(DI
->getOffset())
504 .addMetadata(DI
->getVariable());
505 } else if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
)) {
506 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
507 .addImm(CI
->getZExtValue()).addImm(DI
->getOffset())
508 .addMetadata(DI
->getVariable());
509 } else if (const ConstantFP
*CF
= dyn_cast
<ConstantFP
>(V
)) {
510 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
511 .addFPImm(CF
).addImm(DI
->getOffset())
512 .addMetadata(DI
->getVariable());
513 } else if (unsigned Reg
= lookUpRegForValue(V
)) {
514 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
515 .addReg(Reg
, RegState::Debug
).addImm(DI
->getOffset())
516 .addMetadata(DI
->getVariable());
518 // We can't yet handle anything else here because it would require
519 // generating code, thus altering codegen because of debug info.
520 // Insert an undef so we can see what we dropped.
521 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
522 .addReg(0U).addImm(DI
->getOffset())
523 .addMetadata(DI
->getVariable());
527 case Intrinsic::eh_exception
: {
528 EVT VT
= TLI
.getValueType(I
->getType());
529 switch (TLI
.getOperationAction(ISD::EXCEPTIONADDR
, VT
)) {
531 case TargetLowering::Expand
: {
532 assert(FuncInfo
.MBB
->isLandingPad() &&
533 "Call to eh.exception not in landing pad!");
534 unsigned Reg
= TLI
.getExceptionAddressRegister();
535 const TargetRegisterClass
*RC
= TLI
.getRegClassFor(VT
);
536 unsigned ResultReg
= createResultReg(RC
);
537 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
538 ResultReg
).addReg(Reg
);
539 UpdateValueMap(I
, ResultReg
);
545 case Intrinsic::eh_selector
: {
546 EVT VT
= TLI
.getValueType(I
->getType());
547 switch (TLI
.getOperationAction(ISD::EHSELECTION
, VT
)) {
549 case TargetLowering::Expand
: {
550 if (FuncInfo
.MBB
->isLandingPad())
551 AddCatchInfo(*cast
<CallInst
>(I
), &FuncInfo
.MF
->getMMI(), FuncInfo
.MBB
);
554 FuncInfo
.CatchInfoLost
.insert(cast
<CallInst
>(I
));
556 // FIXME: Mark exception selector register as live in. Hack for PR1508.
557 unsigned Reg
= TLI
.getExceptionSelectorRegister();
558 if (Reg
) FuncInfo
.MBB
->addLiveIn(Reg
);
561 unsigned Reg
= TLI
.getExceptionSelectorRegister();
562 EVT SrcVT
= TLI
.getPointerTy();
563 const TargetRegisterClass
*RC
= TLI
.getRegClassFor(SrcVT
);
564 unsigned ResultReg
= createResultReg(RC
);
565 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
566 ResultReg
).addReg(Reg
);
568 bool ResultRegIsKill
= hasTrivialKill(I
);
570 // Cast the register to the type of the selector.
571 if (SrcVT
.bitsGT(MVT::i32
))
572 ResultReg
= FastEmit_r(SrcVT
.getSimpleVT(), MVT::i32
, ISD::TRUNCATE
,
573 ResultReg
, ResultRegIsKill
);
574 else if (SrcVT
.bitsLT(MVT::i32
))
575 ResultReg
= FastEmit_r(SrcVT
.getSimpleVT(), MVT::i32
,
576 ISD::SIGN_EXTEND
, ResultReg
, ResultRegIsKill
);
578 // Unhandled operand. Halt "fast" selection and bail.
581 UpdateValueMap(I
, ResultReg
);
590 // An arbitrary call. Bail.
594 bool FastISel::SelectCast(const User
*I
, unsigned Opcode
) {
595 EVT SrcVT
= TLI
.getValueType(I
->getOperand(0)->getType());
596 EVT DstVT
= TLI
.getValueType(I
->getType());
598 if (SrcVT
== MVT::Other
|| !SrcVT
.isSimple() ||
599 DstVT
== MVT::Other
|| !DstVT
.isSimple())
600 // Unhandled type. Halt "fast" selection and bail.
603 // Check if the destination type is legal. Or as a special case,
604 // it may be i1 if we're doing a truncate because that's
605 // easy and somewhat common.
606 if (!TLI
.isTypeLegal(DstVT
))
607 if (DstVT
!= MVT::i1
|| Opcode
!= ISD::TRUNCATE
)
608 // Unhandled type. Halt "fast" selection and bail.
611 // Check if the source operand is legal. Or as a special case,
612 // it may be i1 if we're doing zero-extension because that's
613 // easy and somewhat common.
614 if (!TLI
.isTypeLegal(SrcVT
))
615 if (SrcVT
!= MVT::i1
|| Opcode
!= ISD::ZERO_EXTEND
)
616 // Unhandled type. Halt "fast" selection and bail.
619 unsigned InputReg
= getRegForValue(I
->getOperand(0));
621 // Unhandled operand. Halt "fast" selection and bail.
624 bool InputRegIsKill
= hasTrivialKill(I
->getOperand(0));
626 // If the operand is i1, arrange for the high bits in the register to be zero.
627 if (SrcVT
== MVT::i1
) {
628 SrcVT
= TLI
.getTypeToTransformTo(I
->getContext(), SrcVT
);
629 InputReg
= FastEmitZExtFromI1(SrcVT
.getSimpleVT(), InputReg
, InputRegIsKill
);
632 InputRegIsKill
= true;
634 // If the result is i1, truncate to the target's type for i1 first.
635 if (DstVT
== MVT::i1
)
636 DstVT
= TLI
.getTypeToTransformTo(I
->getContext(), DstVT
);
638 unsigned ResultReg
= FastEmit_r(SrcVT
.getSimpleVT(),
641 InputReg
, InputRegIsKill
);
645 UpdateValueMap(I
, ResultReg
);
649 bool FastISel::SelectBitCast(const User
*I
) {
650 // If the bitcast doesn't change the type, just use the operand value.
651 if (I
->getType() == I
->getOperand(0)->getType()) {
652 unsigned Reg
= getRegForValue(I
->getOperand(0));
655 UpdateValueMap(I
, Reg
);
659 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
660 EVT SrcVT
= TLI
.getValueType(I
->getOperand(0)->getType());
661 EVT DstVT
= TLI
.getValueType(I
->getType());
663 if (SrcVT
== MVT::Other
|| !SrcVT
.isSimple() ||
664 DstVT
== MVT::Other
|| !DstVT
.isSimple() ||
665 !TLI
.isTypeLegal(SrcVT
) || !TLI
.isTypeLegal(DstVT
))
666 // Unhandled type. Halt "fast" selection and bail.
669 unsigned Op0
= getRegForValue(I
->getOperand(0));
671 // Unhandled operand. Halt "fast" selection and bail.
674 bool Op0IsKill
= hasTrivialKill(I
->getOperand(0));
676 // First, try to perform the bitcast by inserting a reg-reg copy.
677 unsigned ResultReg
= 0;
678 if (SrcVT
.getSimpleVT() == DstVT
.getSimpleVT()) {
679 TargetRegisterClass
* SrcClass
= TLI
.getRegClassFor(SrcVT
);
680 TargetRegisterClass
* DstClass
= TLI
.getRegClassFor(DstVT
);
681 // Don't attempt a cross-class copy. It will likely fail.
682 if (SrcClass
== DstClass
) {
683 ResultReg
= createResultReg(DstClass
);
684 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
685 ResultReg
).addReg(Op0
);
689 // If the reg-reg copy failed, select a BIT_CONVERT opcode.
691 ResultReg
= FastEmit_r(SrcVT
.getSimpleVT(), DstVT
.getSimpleVT(),
692 ISD::BIT_CONVERT
, Op0
, Op0IsKill
);
697 UpdateValueMap(I
, ResultReg
);
702 FastISel::SelectInstruction(const Instruction
*I
) {
703 // Just before the terminator instruction, insert instructions to
704 // feed PHI nodes in successor blocks.
705 if (isa
<TerminatorInst
>(I
))
706 if (!HandlePHINodesInSuccessorBlocks(I
->getParent()))
709 DL
= I
->getDebugLoc();
711 // First, try doing target-independent selection.
712 if (SelectOperator(I
, I
->getOpcode())) {
717 // Next, try calling the target to attempt to handle the instruction.
718 if (TargetSelectInstruction(I
)) {
727 /// FastEmitBranch - Emit an unconditional branch to the given block,
728 /// unless it is the immediate (fall-through) successor, and update
731 FastISel::FastEmitBranch(MachineBasicBlock
*MSucc
, DebugLoc DL
) {
732 if (FuncInfo
.MBB
->isLayoutSuccessor(MSucc
)) {
733 // The unconditional fall-through case, which needs no instructions.
735 // The unconditional branch case.
736 TII
.InsertBranch(*FuncInfo
.MBB
, MSucc
, NULL
,
737 SmallVector
<MachineOperand
, 0>(), DL
);
739 FuncInfo
.MBB
->addSuccessor(MSucc
);
742 /// SelectFNeg - Emit an FNeg operation.
745 FastISel::SelectFNeg(const User
*I
) {
746 unsigned OpReg
= getRegForValue(BinaryOperator::getFNegArgument(I
));
747 if (OpReg
== 0) return false;
749 bool OpRegIsKill
= hasTrivialKill(I
);
751 // If the target has ISD::FNEG, use it.
752 EVT VT
= TLI
.getValueType(I
->getType());
753 unsigned ResultReg
= FastEmit_r(VT
.getSimpleVT(), VT
.getSimpleVT(),
754 ISD::FNEG
, OpReg
, OpRegIsKill
);
755 if (ResultReg
!= 0) {
756 UpdateValueMap(I
, ResultReg
);
760 // Bitcast the value to integer, twiddle the sign bit with xor,
761 // and then bitcast it back to floating-point.
762 if (VT
.getSizeInBits() > 64) return false;
763 EVT IntVT
= EVT::getIntegerVT(I
->getContext(), VT
.getSizeInBits());
764 if (!TLI
.isTypeLegal(IntVT
))
767 unsigned IntReg
= FastEmit_r(VT
.getSimpleVT(), IntVT
.getSimpleVT(),
768 ISD::BIT_CONVERT
, OpReg
, OpRegIsKill
);
772 unsigned IntResultReg
= FastEmit_ri_(IntVT
.getSimpleVT(), ISD::XOR
,
773 IntReg
, /*Kill=*/true,
774 UINT64_C(1) << (VT
.getSizeInBits()-1),
775 IntVT
.getSimpleVT());
776 if (IntResultReg
== 0)
779 ResultReg
= FastEmit_r(IntVT
.getSimpleVT(), VT
.getSimpleVT(),
780 ISD::BIT_CONVERT
, IntResultReg
, /*Kill=*/true);
784 UpdateValueMap(I
, ResultReg
);
789 FastISel::SelectOperator(const User
*I
, unsigned Opcode
) {
791 case Instruction::Add
:
792 return SelectBinaryOp(I
, ISD::ADD
);
793 case Instruction::FAdd
:
794 return SelectBinaryOp(I
, ISD::FADD
);
795 case Instruction::Sub
:
796 return SelectBinaryOp(I
, ISD::SUB
);
797 case Instruction::FSub
:
798 // FNeg is currently represented in LLVM IR as a special case of FSub.
799 if (BinaryOperator::isFNeg(I
))
800 return SelectFNeg(I
);
801 return SelectBinaryOp(I
, ISD::FSUB
);
802 case Instruction::Mul
:
803 return SelectBinaryOp(I
, ISD::MUL
);
804 case Instruction::FMul
:
805 return SelectBinaryOp(I
, ISD::FMUL
);
806 case Instruction::SDiv
:
807 return SelectBinaryOp(I
, ISD::SDIV
);
808 case Instruction::UDiv
:
809 return SelectBinaryOp(I
, ISD::UDIV
);
810 case Instruction::FDiv
:
811 return SelectBinaryOp(I
, ISD::FDIV
);
812 case Instruction::SRem
:
813 return SelectBinaryOp(I
, ISD::SREM
);
814 case Instruction::URem
:
815 return SelectBinaryOp(I
, ISD::UREM
);
816 case Instruction::FRem
:
817 return SelectBinaryOp(I
, ISD::FREM
);
818 case Instruction::Shl
:
819 return SelectBinaryOp(I
, ISD::SHL
);
820 case Instruction::LShr
:
821 return SelectBinaryOp(I
, ISD::SRL
);
822 case Instruction::AShr
:
823 return SelectBinaryOp(I
, ISD::SRA
);
824 case Instruction::And
:
825 return SelectBinaryOp(I
, ISD::AND
);
826 case Instruction::Or
:
827 return SelectBinaryOp(I
, ISD::OR
);
828 case Instruction::Xor
:
829 return SelectBinaryOp(I
, ISD::XOR
);
831 case Instruction::GetElementPtr
:
832 return SelectGetElementPtr(I
);
834 case Instruction::Br
: {
835 const BranchInst
*BI
= cast
<BranchInst
>(I
);
837 if (BI
->isUnconditional()) {
838 const BasicBlock
*LLVMSucc
= BI
->getSuccessor(0);
839 MachineBasicBlock
*MSucc
= FuncInfo
.MBBMap
[LLVMSucc
];
840 FastEmitBranch(MSucc
, BI
->getDebugLoc());
844 // Conditional branches are not handed yet.
845 // Halt "fast" selection and bail.
849 case Instruction::Unreachable
:
853 case Instruction::Alloca
:
854 // FunctionLowering has the static-sized case covered.
855 if (FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(I
)))
858 // Dynamic-sized alloca is not handled yet.
861 case Instruction::Call
:
862 return SelectCall(I
);
864 case Instruction::BitCast
:
865 return SelectBitCast(I
);
867 case Instruction::FPToSI
:
868 return SelectCast(I
, ISD::FP_TO_SINT
);
869 case Instruction::ZExt
:
870 return SelectCast(I
, ISD::ZERO_EXTEND
);
871 case Instruction::SExt
:
872 return SelectCast(I
, ISD::SIGN_EXTEND
);
873 case Instruction::Trunc
:
874 return SelectCast(I
, ISD::TRUNCATE
);
875 case Instruction::SIToFP
:
876 return SelectCast(I
, ISD::SINT_TO_FP
);
878 case Instruction::IntToPtr
: // Deliberate fall-through.
879 case Instruction::PtrToInt
: {
880 EVT SrcVT
= TLI
.getValueType(I
->getOperand(0)->getType());
881 EVT DstVT
= TLI
.getValueType(I
->getType());
882 if (DstVT
.bitsGT(SrcVT
))
883 return SelectCast(I
, ISD::ZERO_EXTEND
);
884 if (DstVT
.bitsLT(SrcVT
))
885 return SelectCast(I
, ISD::TRUNCATE
);
886 unsigned Reg
= getRegForValue(I
->getOperand(0));
887 if (Reg
== 0) return false;
888 UpdateValueMap(I
, Reg
);
892 case Instruction::PHI
:
893 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
896 // Unhandled instruction. Halt "fast" selection and bail.
901 FastISel::FastISel(FunctionLoweringInfo
&funcInfo
)
902 : FuncInfo(funcInfo
),
903 MRI(FuncInfo
.MF
->getRegInfo()),
904 MFI(*FuncInfo
.MF
->getFrameInfo()),
905 MCP(*FuncInfo
.MF
->getConstantPool()),
906 TM(FuncInfo
.MF
->getTarget()),
907 TD(*TM
.getTargetData()),
908 TII(*TM
.getInstrInfo()),
909 TLI(*TM
.getTargetLowering()),
910 TRI(*TM
.getRegisterInfo()) {
913 FastISel::~FastISel() {}
915 unsigned FastISel::FastEmit_(MVT
, MVT
,
920 unsigned FastISel::FastEmit_r(MVT
, MVT
,
922 unsigned /*Op0*/, bool /*Op0IsKill*/) {
926 unsigned FastISel::FastEmit_rr(MVT
, MVT
,
928 unsigned /*Op0*/, bool /*Op0IsKill*/,
929 unsigned /*Op1*/, bool /*Op1IsKill*/) {
933 unsigned FastISel::FastEmit_i(MVT
, MVT
, unsigned, uint64_t /*Imm*/) {
937 unsigned FastISel::FastEmit_f(MVT
, MVT
,
938 unsigned, const ConstantFP
* /*FPImm*/) {
942 unsigned FastISel::FastEmit_ri(MVT
, MVT
,
944 unsigned /*Op0*/, bool /*Op0IsKill*/,
949 unsigned FastISel::FastEmit_rf(MVT
, MVT
,
951 unsigned /*Op0*/, bool /*Op0IsKill*/,
952 const ConstantFP
* /*FPImm*/) {
956 unsigned FastISel::FastEmit_rri(MVT
, MVT
,
958 unsigned /*Op0*/, bool /*Op0IsKill*/,
959 unsigned /*Op1*/, bool /*Op1IsKill*/,
964 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
965 /// to emit an instruction with an immediate operand using FastEmit_ri.
966 /// If that fails, it materializes the immediate into a register and try
967 /// FastEmit_rr instead.
968 unsigned FastISel::FastEmit_ri_(MVT VT
, unsigned Opcode
,
969 unsigned Op0
, bool Op0IsKill
,
970 uint64_t Imm
, MVT ImmType
) {
971 // First check if immediate type is legal. If not, we can't use the ri form.
972 unsigned ResultReg
= FastEmit_ri(VT
, VT
, Opcode
, Op0
, Op0IsKill
, Imm
);
975 unsigned MaterialReg
= FastEmit_i(ImmType
, ImmType
, ISD::Constant
, Imm
);
976 if (MaterialReg
== 0)
978 return FastEmit_rr(VT
, VT
, Opcode
,
980 MaterialReg
, /*Kill=*/true);
983 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
984 /// to emit an instruction with a floating-point immediate operand using
985 /// FastEmit_rf. If that fails, it materializes the immediate into a register
986 /// and try FastEmit_rr instead.
987 unsigned FastISel::FastEmit_rf_(MVT VT
, unsigned Opcode
,
988 unsigned Op0
, bool Op0IsKill
,
989 const ConstantFP
*FPImm
, MVT ImmType
) {
990 // First check if immediate type is legal. If not, we can't use the rf form.
991 unsigned ResultReg
= FastEmit_rf(VT
, VT
, Opcode
, Op0
, Op0IsKill
, FPImm
);
995 // Materialize the constant in a register.
996 unsigned MaterialReg
= FastEmit_f(ImmType
, ImmType
, ISD::ConstantFP
, FPImm
);
997 if (MaterialReg
== 0) {
998 // If the target doesn't have a way to directly enter a floating-point
999 // value into a register, use an alternate approach.
1000 // TODO: The current approach only supports floating-point constants
1001 // that can be constructed by conversion from integer values. This should
1002 // be replaced by code that creates a load from a constant-pool entry,
1003 // which will require some target-specific work.
1004 const APFloat
&Flt
= FPImm
->getValueAPF();
1005 EVT IntVT
= TLI
.getPointerTy();
1008 uint32_t IntBitWidth
= IntVT
.getSizeInBits();
1010 (void) Flt
.convertToInteger(x
, IntBitWidth
, /*isSigned=*/true,
1011 APFloat::rmTowardZero
, &isExact
);
1014 APInt
IntVal(IntBitWidth
, 2, x
);
1016 unsigned IntegerReg
= FastEmit_i(IntVT
.getSimpleVT(), IntVT
.getSimpleVT(),
1017 ISD::Constant
, IntVal
.getZExtValue());
1018 if (IntegerReg
== 0)
1020 MaterialReg
= FastEmit_r(IntVT
.getSimpleVT(), VT
,
1021 ISD::SINT_TO_FP
, IntegerReg
, /*Kill=*/true);
1022 if (MaterialReg
== 0)
1025 return FastEmit_rr(VT
, VT
, Opcode
,
1027 MaterialReg
, /*Kill=*/true);
1030 unsigned FastISel::createResultReg(const TargetRegisterClass
* RC
) {
1031 return MRI
.createVirtualRegister(RC
);
1034 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode
,
1035 const TargetRegisterClass
* RC
) {
1036 unsigned ResultReg
= createResultReg(RC
);
1037 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1039 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
);
1043 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode
,
1044 const TargetRegisterClass
*RC
,
1045 unsigned Op0
, bool Op0IsKill
) {
1046 unsigned ResultReg
= createResultReg(RC
);
1047 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1049 if (II
.getNumDefs() >= 1)
1050 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1051 .addReg(Op0
, Op0IsKill
* RegState::Kill
);
1053 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1054 .addReg(Op0
, Op0IsKill
* RegState::Kill
);
1055 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1056 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1062 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode
,
1063 const TargetRegisterClass
*RC
,
1064 unsigned Op0
, bool Op0IsKill
,
1065 unsigned Op1
, bool Op1IsKill
) {
1066 unsigned ResultReg
= createResultReg(RC
);
1067 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1069 if (II
.getNumDefs() >= 1)
1070 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1071 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1072 .addReg(Op1
, Op1IsKill
* RegState::Kill
);
1074 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1075 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1076 .addReg(Op1
, Op1IsKill
* RegState::Kill
);
1077 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1078 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1083 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode
,
1084 const TargetRegisterClass
*RC
,
1085 unsigned Op0
, bool Op0IsKill
,
1087 unsigned ResultReg
= createResultReg(RC
);
1088 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1090 if (II
.getNumDefs() >= 1)
1091 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1092 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1095 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1096 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1098 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1099 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1104 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode
,
1105 const TargetRegisterClass
*RC
,
1106 unsigned Op0
, bool Op0IsKill
,
1107 const ConstantFP
*FPImm
) {
1108 unsigned ResultReg
= createResultReg(RC
);
1109 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1111 if (II
.getNumDefs() >= 1)
1112 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1113 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1116 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1117 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1119 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1120 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1125 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode
,
1126 const TargetRegisterClass
*RC
,
1127 unsigned Op0
, bool Op0IsKill
,
1128 unsigned Op1
, bool Op1IsKill
,
1130 unsigned ResultReg
= createResultReg(RC
);
1131 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1133 if (II
.getNumDefs() >= 1)
1134 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
)
1135 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1136 .addReg(Op1
, Op1IsKill
* RegState::Kill
)
1139 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
)
1140 .addReg(Op0
, Op0IsKill
* RegState::Kill
)
1141 .addReg(Op1
, Op1IsKill
* RegState::Kill
)
1143 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1144 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1149 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode
,
1150 const TargetRegisterClass
*RC
,
1152 unsigned ResultReg
= createResultReg(RC
);
1153 const TargetInstrDesc
&II
= TII
.get(MachineInstOpcode
);
1155 if (II
.getNumDefs() >= 1)
1156 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
, ResultReg
).addImm(Imm
);
1158 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, II
).addImm(Imm
);
1159 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DL
, TII
.get(TargetOpcode::COPY
),
1160 ResultReg
).addReg(II
.ImplicitDefs
[0]);
1165 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT
,
1166 unsigned Op0
, bool Op0IsKill
,
1168 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(RetVT
));
1169 assert(TargetRegisterInfo::isVirtualRegister(Op0
) &&
1170 "Cannot yet extract from physregs");
1171 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
,
1172 DL
, TII
.get(TargetOpcode::COPY
), ResultReg
)
1173 .addReg(Op0
, getKillRegState(Op0IsKill
), Idx
);
1177 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1178 /// with all but the least significant bit set to zero.
1179 unsigned FastISel::FastEmitZExtFromI1(MVT VT
, unsigned Op0
, bool Op0IsKill
) {
1180 return FastEmit_ri(VT
, VT
, ISD::AND
, Op0
, Op0IsKill
, 1);
1183 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1184 /// Emit code to ensure constants are copied into registers when needed.
1185 /// Remember the virtual registers that need to be added to the Machine PHI
1186 /// nodes as input. We cannot just directly add them, because expansion
1187 /// might result in multiple MBB's for one BB. As such, the start of the
1188 /// BB might correspond to a different MBB than the end.
1189 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock
*LLVMBB
) {
1190 const TerminatorInst
*TI
= LLVMBB
->getTerminator();
1192 SmallPtrSet
<MachineBasicBlock
*, 4> SuccsHandled
;
1193 unsigned OrigNumPHINodesToUpdate
= FuncInfo
.PHINodesToUpdate
.size();
1195 // Check successor nodes' PHI nodes that expect a constant to be available
1197 for (unsigned succ
= 0, e
= TI
->getNumSuccessors(); succ
!= e
; ++succ
) {
1198 const BasicBlock
*SuccBB
= TI
->getSuccessor(succ
);
1199 if (!isa
<PHINode
>(SuccBB
->begin())) continue;
1200 MachineBasicBlock
*SuccMBB
= FuncInfo
.MBBMap
[SuccBB
];
1202 // If this terminator has multiple identical successors (common for
1203 // switches), only handle each succ once.
1204 if (!SuccsHandled
.insert(SuccMBB
)) continue;
1206 MachineBasicBlock::iterator MBBI
= SuccMBB
->begin();
1208 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1209 // nodes and Machine PHI nodes, but the incoming operands have not been
1211 for (BasicBlock::const_iterator I
= SuccBB
->begin();
1212 const PHINode
*PN
= dyn_cast
<PHINode
>(I
); ++I
) {
1214 // Ignore dead phi's.
1215 if (PN
->use_empty()) continue;
1217 // Only handle legal types. Two interesting things to note here. First,
1218 // by bailing out early, we may leave behind some dead instructions,
1219 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1220 // own moves. Second, this check is necessary becuase FastISel doesn't
1221 // use CreateRegs to create registers, so it always creates
1222 // exactly one register for each non-void instruction.
1223 EVT VT
= TLI
.getValueType(PN
->getType(), /*AllowUnknown=*/true);
1224 if (VT
== MVT::Other
|| !TLI
.isTypeLegal(VT
)) {
1227 VT
= TLI
.getTypeToTransformTo(LLVMBB
->getContext(), VT
);
1229 FuncInfo
.PHINodesToUpdate
.resize(OrigNumPHINodesToUpdate
);
1234 const Value
*PHIOp
= PN
->getIncomingValueForBlock(LLVMBB
);
1236 // Set the DebugLoc for the copy. Prefer the location of the operand
1237 // if there is one; use the location of the PHI otherwise.
1238 DL
= PN
->getDebugLoc();
1239 if (const Instruction
*Inst
= dyn_cast
<Instruction
>(PHIOp
))
1240 DL
= Inst
->getDebugLoc();
1242 unsigned Reg
= getRegForValue(PHIOp
);
1244 FuncInfo
.PHINodesToUpdate
.resize(OrigNumPHINodesToUpdate
);
1247 FuncInfo
.PHINodesToUpdate
.push_back(std::make_pair(MBBI
++, Reg
));