add some missing quotes in debug output
[llvm/avr.git] / lib / CodeGen / SelectionDAG / SelectionDAGBuild.cpp
blob8b9a5a5459d4e3d1cf457427e78903d3ac55af23
1 //===-- SelectionDAGBuild.cpp - Selection-DAG building --------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "SelectionDAGBuild.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Constants.h"
21 #include "llvm/CallingConv.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Function.h"
24 #include "llvm/GlobalVariable.h"
25 #include "llvm/InlineAsm.h"
26 #include "llvm/Instructions.h"
27 #include "llvm/Intrinsics.h"
28 #include "llvm/IntrinsicInst.h"
29 #include "llvm/Module.h"
30 #include "llvm/CodeGen/FastISel.h"
31 #include "llvm/CodeGen/GCStrategy.h"
32 #include "llvm/CodeGen/GCMetadata.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineInstrBuilder.h"
36 #include "llvm/CodeGen/MachineJumpTableInfo.h"
37 #include "llvm/CodeGen/MachineModuleInfo.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/PseudoSourceValue.h"
40 #include "llvm/CodeGen/SelectionDAG.h"
41 #include "llvm/CodeGen/DwarfWriter.h"
42 #include "llvm/Analysis/DebugInfo.h"
43 #include "llvm/Target/TargetRegisterInfo.h"
44 #include "llvm/Target/TargetData.h"
45 #include "llvm/Target/TargetFrameInfo.h"
46 #include "llvm/Target/TargetInstrInfo.h"
47 #include "llvm/Target/TargetIntrinsicInfo.h"
48 #include "llvm/Target/TargetLowering.h"
49 #include "llvm/Target/TargetOptions.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include <algorithm>
57 using namespace llvm;
59 /// LimitFloatPrecision - Generate low-precision inline sequences for
60 /// some float libcalls (6, 8 or 12 bits).
61 static unsigned LimitFloatPrecision;
63 static cl::opt<unsigned, true>
64 LimitFPPrecision("limit-float-precision",
65 cl::desc("Generate low-precision inline sequences "
66 "for some float libcalls"),
67 cl::location(LimitFloatPrecision),
68 cl::init(0));
70 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
71 /// of insertvalue or extractvalue indices that identify a member, return
72 /// the linearized index of the start of the member.
73 ///
74 static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
75 const unsigned *Indices,
76 const unsigned *IndicesEnd,
77 unsigned CurIndex = 0) {
78 // Base case: We're done.
79 if (Indices && Indices == IndicesEnd)
80 return CurIndex;
82 // Given a struct type, recursively traverse the elements.
83 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
84 for (StructType::element_iterator EB = STy->element_begin(),
85 EI = EB,
86 EE = STy->element_end();
87 EI != EE; ++EI) {
88 if (Indices && *Indices == unsigned(EI - EB))
89 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
90 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
92 return CurIndex;
94 // Given an array type, recursively traverse the elements.
95 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
96 const Type *EltTy = ATy->getElementType();
97 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
98 if (Indices && *Indices == i)
99 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
100 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
102 return CurIndex;
104 // We haven't found the type we're looking for, so keep searching.
105 return CurIndex + 1;
108 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
109 /// EVTs that represent all the individual underlying
110 /// non-aggregate types that comprise it.
112 /// If Offsets is non-null, it points to a vector to be filled in
113 /// with the in-memory offsets of each of the individual values.
115 static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
116 SmallVectorImpl<EVT> &ValueVTs,
117 SmallVectorImpl<uint64_t> *Offsets = 0,
118 uint64_t StartingOffset = 0) {
119 // Given a struct type, recursively traverse the elements.
120 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
121 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
122 for (StructType::element_iterator EB = STy->element_begin(),
123 EI = EB,
124 EE = STy->element_end();
125 EI != EE; ++EI)
126 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
127 StartingOffset + SL->getElementOffset(EI - EB));
128 return;
130 // Given an array type, recursively traverse the elements.
131 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
132 const Type *EltTy = ATy->getElementType();
133 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
134 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
135 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
136 StartingOffset + i * EltSize);
137 return;
139 // Interpret void as zero return values.
140 if (Ty == Type::getVoidTy(Ty->getContext()))
141 return;
142 // Base case: we can get an EVT for this LLVM IR type.
143 ValueVTs.push_back(TLI.getValueType(Ty));
144 if (Offsets)
145 Offsets->push_back(StartingOffset);
148 namespace llvm {
149 /// RegsForValue - This struct represents the registers (physical or virtual)
150 /// that a particular set of values is assigned, and the type information about
151 /// the value. The most common situation is to represent one value at a time,
152 /// but struct or array values are handled element-wise as multiple values.
153 /// The splitting of aggregates is performed recursively, so that we never
154 /// have aggregate-typed registers. The values at this point do not necessarily
155 /// have legal types, so each value may require one or more registers of some
156 /// legal type.
158 struct VISIBILITY_HIDDEN RegsForValue {
159 /// TLI - The TargetLowering object.
161 const TargetLowering *TLI;
163 /// ValueVTs - The value types of the values, which may not be legal, and
164 /// may need be promoted or synthesized from one or more registers.
166 SmallVector<EVT, 4> ValueVTs;
168 /// RegVTs - The value types of the registers. This is the same size as
169 /// ValueVTs and it records, for each value, what the type of the assigned
170 /// register or registers are. (Individual values are never synthesized
171 /// from more than one type of register.)
173 /// With virtual registers, the contents of RegVTs is redundant with TLI's
174 /// getRegisterType member function, however when with physical registers
175 /// it is necessary to have a separate record of the types.
177 SmallVector<EVT, 4> RegVTs;
179 /// Regs - This list holds the registers assigned to the values.
180 /// Each legal or promoted value requires one register, and each
181 /// expanded value requires multiple registers.
183 SmallVector<unsigned, 4> Regs;
185 RegsForValue() : TLI(0) {}
187 RegsForValue(const TargetLowering &tli,
188 const SmallVector<unsigned, 4> &regs,
189 EVT regvt, EVT valuevt)
190 : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
191 RegsForValue(const TargetLowering &tli,
192 const SmallVector<unsigned, 4> &regs,
193 const SmallVector<EVT, 4> &regvts,
194 const SmallVector<EVT, 4> &valuevts)
195 : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
196 RegsForValue(LLVMContext &Context, const TargetLowering &tli,
197 unsigned Reg, const Type *Ty) : TLI(&tli) {
198 ComputeValueVTs(tli, Ty, ValueVTs);
200 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
201 EVT ValueVT = ValueVTs[Value];
202 unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
203 EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
204 for (unsigned i = 0; i != NumRegs; ++i)
205 Regs.push_back(Reg + i);
206 RegVTs.push_back(RegisterVT);
207 Reg += NumRegs;
211 /// append - Add the specified values to this one.
212 void append(const RegsForValue &RHS) {
213 TLI = RHS.TLI;
214 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
215 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
216 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
220 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
221 /// this value and returns the result as a ValueVTs value. This uses
222 /// Chain/Flag as the input and updates them for the output Chain/Flag.
223 /// If the Flag pointer is NULL, no flag is used.
224 SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
225 SDValue &Chain, SDValue *Flag) const;
227 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
228 /// specified value into the registers specified by this object. This uses
229 /// Chain/Flag as the input and updates them for the output Chain/Flag.
230 /// If the Flag pointer is NULL, no flag is used.
231 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
232 SDValue &Chain, SDValue *Flag) const;
234 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
235 /// operand list. This adds the code marker, matching input operand index
236 /// (if applicable), and includes the number of values added into it.
237 void AddInlineAsmOperands(unsigned Code,
238 bool HasMatching, unsigned MatchingIdx,
239 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
243 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
244 /// PHI nodes or outside of the basic block that defines it, or used by a
245 /// switch or atomic instruction, which may expand to multiple basic blocks.
246 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
247 if (isa<PHINode>(I)) return true;
248 BasicBlock *BB = I->getParent();
249 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
250 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
251 return true;
252 return false;
255 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
256 /// entry block, return true. This includes arguments used by switches, since
257 /// the switch may expand into multiple basic blocks.
258 static bool isOnlyUsedInEntryBlock(Argument *A, bool EnableFastISel) {
259 // With FastISel active, we may be splitting blocks, so force creation
260 // of virtual registers for all non-dead arguments.
261 // Don't force virtual registers for byval arguments though, because
262 // fast-isel can't handle those in all cases.
263 if (EnableFastISel && !A->hasByValAttr())
264 return A->use_empty();
266 BasicBlock *Entry = A->getParent()->begin();
267 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
268 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
269 return false; // Use not in entry block.
270 return true;
273 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli)
274 : TLI(tli) {
277 void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
278 SelectionDAG &DAG,
279 bool EnableFastISel) {
280 Fn = &fn;
281 MF = &mf;
282 RegInfo = &MF->getRegInfo();
284 // Create a vreg for each argument register that is not dead and is used
285 // outside of the entry block for the function.
286 for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
287 AI != E; ++AI)
288 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
289 InitializeRegForValue(AI);
291 // Initialize the mapping of values to registers. This is only set up for
292 // instruction values that are used outside of the block that defines
293 // them.
294 Function::iterator BB = Fn->begin(), EB = Fn->end();
295 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
296 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
297 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
298 const Type *Ty = AI->getAllocatedType();
299 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
300 unsigned Align =
301 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
302 AI->getAlignment());
304 TySize *= CUI->getZExtValue(); // Get total allocated size.
305 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
306 StaticAllocaMap[AI] =
307 MF->getFrameInfo()->CreateStackObject(TySize, Align);
310 for (; BB != EB; ++BB)
311 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
312 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
313 if (!isa<AllocaInst>(I) ||
314 !StaticAllocaMap.count(cast<AllocaInst>(I)))
315 InitializeRegForValue(I);
317 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
318 // also creates the initial PHI MachineInstrs, though none of the input
319 // operands are populated.
320 for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) {
321 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
322 MBBMap[BB] = MBB;
323 MF->push_back(MBB);
325 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
326 // appropriate.
327 PHINode *PN;
328 DebugLoc DL;
329 for (BasicBlock::iterator
330 I = BB->begin(), E = BB->end(); I != E; ++I) {
331 if (CallInst *CI = dyn_cast<CallInst>(I)) {
332 if (Function *F = CI->getCalledFunction()) {
333 switch (F->getIntrinsicID()) {
334 default: break;
335 case Intrinsic::dbg_stoppoint: {
336 DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
337 if (isValidDebugInfoIntrinsic(*SPI, CodeGenOpt::Default))
338 DL = ExtractDebugLocation(*SPI, MF->getDebugLocInfo());
339 break;
341 case Intrinsic::dbg_func_start: {
342 DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
343 if (isValidDebugInfoIntrinsic(*FSI, CodeGenOpt::Default))
344 DL = ExtractDebugLocation(*FSI, MF->getDebugLocInfo());
345 break;
351 PN = dyn_cast<PHINode>(I);
352 if (!PN || PN->use_empty()) continue;
354 unsigned PHIReg = ValueMap[PN];
355 assert(PHIReg && "PHI node does not have an assigned virtual register!");
357 SmallVector<EVT, 4> ValueVTs;
358 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
359 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
360 EVT VT = ValueVTs[vti];
361 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
362 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
363 for (unsigned i = 0; i != NumRegisters; ++i)
364 BuildMI(MBB, DL, TII->get(TargetInstrInfo::PHI), PHIReg + i);
365 PHIReg += NumRegisters;
371 unsigned FunctionLoweringInfo::MakeReg(EVT VT) {
372 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
375 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
376 /// the correctly promoted or expanded types. Assign these registers
377 /// consecutive vreg numbers and return the first assigned number.
379 /// In the case that the given value has struct or array type, this function
380 /// will assign registers for each member or element.
382 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
383 SmallVector<EVT, 4> ValueVTs;
384 ComputeValueVTs(TLI, V->getType(), ValueVTs);
386 unsigned FirstReg = 0;
387 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
388 EVT ValueVT = ValueVTs[Value];
389 EVT RegisterVT = TLI.getRegisterType(V->getContext(), ValueVT);
391 unsigned NumRegs = TLI.getNumRegisters(V->getContext(), ValueVT);
392 for (unsigned i = 0; i != NumRegs; ++i) {
393 unsigned R = MakeReg(RegisterVT);
394 if (!FirstReg) FirstReg = R;
397 return FirstReg;
400 /// getCopyFromParts - Create a value that contains the specified legal parts
401 /// combined into the value they represent. If the parts combine to a type
402 /// larger then ValueVT then AssertOp can be used to specify whether the extra
403 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
404 /// (ISD::AssertSext).
405 static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
406 const SDValue *Parts,
407 unsigned NumParts, EVT PartVT, EVT ValueVT,
408 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
409 assert(NumParts > 0 && "No parts to assemble!");
410 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
411 SDValue Val = Parts[0];
413 if (NumParts > 1) {
414 // Assemble the value from multiple parts.
415 if (!ValueVT.isVector() && ValueVT.isInteger()) {
416 unsigned PartBits = PartVT.getSizeInBits();
417 unsigned ValueBits = ValueVT.getSizeInBits();
419 // Assemble the power of 2 part.
420 unsigned RoundParts = NumParts & (NumParts - 1) ?
421 1 << Log2_32(NumParts) : NumParts;
422 unsigned RoundBits = PartBits * RoundParts;
423 EVT RoundVT = RoundBits == ValueBits ?
424 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
425 SDValue Lo, Hi;
427 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
429 if (RoundParts > 2) {
430 Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
431 Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
432 PartVT, HalfVT);
433 } else {
434 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
435 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
437 if (TLI.isBigEndian())
438 std::swap(Lo, Hi);
439 Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
441 if (RoundParts < NumParts) {
442 // Assemble the trailing non-power-of-2 part.
443 unsigned OddParts = NumParts - RoundParts;
444 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
445 Hi = getCopyFromParts(DAG, dl,
446 Parts+RoundParts, OddParts, PartVT, OddVT);
448 // Combine the round and odd parts.
449 Lo = Val;
450 if (TLI.isBigEndian())
451 std::swap(Lo, Hi);
452 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
453 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
454 Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
455 DAG.getConstant(Lo.getValueType().getSizeInBits(),
456 TLI.getPointerTy()));
457 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
458 Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
460 } else if (ValueVT.isVector()) {
461 // Handle a multi-element vector.
462 EVT IntermediateVT, RegisterVT;
463 unsigned NumIntermediates;
464 unsigned NumRegs =
465 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
466 NumIntermediates, RegisterVT);
467 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
468 NumParts = NumRegs; // Silence a compiler warning.
469 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
470 assert(RegisterVT == Parts[0].getValueType() &&
471 "Part type doesn't match part!");
473 // Assemble the parts into intermediate operands.
474 SmallVector<SDValue, 8> Ops(NumIntermediates);
475 if (NumIntermediates == NumParts) {
476 // If the register was not expanded, truncate or copy the value,
477 // as appropriate.
478 for (unsigned i = 0; i != NumParts; ++i)
479 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
480 PartVT, IntermediateVT);
481 } else if (NumParts > 0) {
482 // If the intermediate type was expanded, build the intermediate operands
483 // from the parts.
484 assert(NumParts % NumIntermediates == 0 &&
485 "Must expand into a divisible number of parts!");
486 unsigned Factor = NumParts / NumIntermediates;
487 for (unsigned i = 0; i != NumIntermediates; ++i)
488 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
489 PartVT, IntermediateVT);
492 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
493 // operands.
494 Val = DAG.getNode(IntermediateVT.isVector() ?
495 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
496 ValueVT, &Ops[0], NumIntermediates);
497 } else if (PartVT.isFloatingPoint()) {
498 // FP split into multiple FP parts (for ppcf128)
499 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
500 "Unexpected split");
501 SDValue Lo, Hi;
502 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[0]);
503 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[1]);
504 if (TLI.isBigEndian())
505 std::swap(Lo, Hi);
506 Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
507 } else {
508 // FP split into integer parts (soft fp)
509 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
510 !PartVT.isVector() && "Unexpected split");
511 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
512 Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
516 // There is now one part, held in Val. Correct it to match ValueVT.
517 PartVT = Val.getValueType();
519 if (PartVT == ValueVT)
520 return Val;
522 if (PartVT.isVector()) {
523 assert(ValueVT.isVector() && "Unknown vector conversion!");
524 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
527 if (ValueVT.isVector()) {
528 assert(ValueVT.getVectorElementType() == PartVT &&
529 ValueVT.getVectorNumElements() == 1 &&
530 "Only trivial scalar-to-vector conversions should get here!");
531 return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
534 if (PartVT.isInteger() &&
535 ValueVT.isInteger()) {
536 if (ValueVT.bitsLT(PartVT)) {
537 // For a truncate, see if we have any information to
538 // indicate whether the truncated bits will always be
539 // zero or sign-extension.
540 if (AssertOp != ISD::DELETED_NODE)
541 Val = DAG.getNode(AssertOp, dl, PartVT, Val,
542 DAG.getValueType(ValueVT));
543 return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
544 } else {
545 return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
549 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
550 if (ValueVT.bitsLT(Val.getValueType()))
551 // FP_ROUND's are always exact here.
552 return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
553 DAG.getIntPtrConstant(1));
554 return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
557 if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
558 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
560 llvm_unreachable("Unknown mismatch!");
561 return SDValue();
564 /// getCopyToParts - Create a series of nodes that contain the specified value
565 /// split into legal parts. If the parts contain more bits than Val, then, for
566 /// integers, ExtendKind can be used to specify how to generate the extra bits.
567 static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
568 SDValue *Parts, unsigned NumParts, EVT PartVT,
569 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
570 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
571 EVT PtrVT = TLI.getPointerTy();
572 EVT ValueVT = Val.getValueType();
573 unsigned PartBits = PartVT.getSizeInBits();
574 unsigned OrigNumParts = NumParts;
575 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
577 if (!NumParts)
578 return;
580 if (!ValueVT.isVector()) {
581 if (PartVT == ValueVT) {
582 assert(NumParts == 1 && "No-op copy with multiple parts!");
583 Parts[0] = Val;
584 return;
587 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
588 // If the parts cover more bits than the value has, promote the value.
589 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
590 assert(NumParts == 1 && "Do not know what to promote to!");
591 Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
592 } else if (PartVT.isInteger() && ValueVT.isInteger()) {
593 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
594 Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
595 } else {
596 llvm_unreachable("Unknown mismatch!");
598 } else if (PartBits == ValueVT.getSizeInBits()) {
599 // Different types of the same size.
600 assert(NumParts == 1 && PartVT != ValueVT);
601 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
602 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
603 // If the parts cover less bits than value has, truncate the value.
604 if (PartVT.isInteger() && ValueVT.isInteger()) {
605 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
606 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
607 } else {
608 llvm_unreachable("Unknown mismatch!");
612 // The value may have changed - recompute ValueVT.
613 ValueVT = Val.getValueType();
614 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
615 "Failed to tile the value with PartVT!");
617 if (NumParts == 1) {
618 assert(PartVT == ValueVT && "Type conversion failed!");
619 Parts[0] = Val;
620 return;
623 // Expand the value into multiple parts.
624 if (NumParts & (NumParts - 1)) {
625 // The number of parts is not a power of 2. Split off and copy the tail.
626 assert(PartVT.isInteger() && ValueVT.isInteger() &&
627 "Do not know what to expand to!");
628 unsigned RoundParts = 1 << Log2_32(NumParts);
629 unsigned RoundBits = RoundParts * PartBits;
630 unsigned OddParts = NumParts - RoundParts;
631 SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
632 DAG.getConstant(RoundBits,
633 TLI.getPointerTy()));
634 getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
635 if (TLI.isBigEndian())
636 // The odd parts were reversed by getCopyToParts - unreverse them.
637 std::reverse(Parts + RoundParts, Parts + NumParts);
638 NumParts = RoundParts;
639 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
640 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
643 // The number of parts is a power of 2. Repeatedly bisect the value using
644 // EXTRACT_ELEMENT.
645 Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
646 EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()),
647 Val);
648 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
649 for (unsigned i = 0; i < NumParts; i += StepSize) {
650 unsigned ThisBits = StepSize * PartBits / 2;
651 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
652 SDValue &Part0 = Parts[i];
653 SDValue &Part1 = Parts[i+StepSize/2];
655 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
656 ThisVT, Part0,
657 DAG.getConstant(1, PtrVT));
658 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
659 ThisVT, Part0,
660 DAG.getConstant(0, PtrVT));
662 if (ThisBits == PartBits && ThisVT != PartVT) {
663 Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
664 PartVT, Part0);
665 Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
666 PartVT, Part1);
671 if (TLI.isBigEndian())
672 std::reverse(Parts, Parts + OrigNumParts);
674 return;
677 // Vector ValueVT.
678 if (NumParts == 1) {
679 if (PartVT != ValueVT) {
680 if (PartVT.isVector()) {
681 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
682 } else {
683 assert(ValueVT.getVectorElementType() == PartVT &&
684 ValueVT.getVectorNumElements() == 1 &&
685 "Only trivial vector-to-scalar conversions should get here!");
686 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
687 PartVT, Val,
688 DAG.getConstant(0, PtrVT));
692 Parts[0] = Val;
693 return;
696 // Handle a multi-element vector.
697 EVT IntermediateVT, RegisterVT;
698 unsigned NumIntermediates;
699 unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
700 IntermediateVT, NumIntermediates, RegisterVT);
701 unsigned NumElements = ValueVT.getVectorNumElements();
703 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
704 NumParts = NumRegs; // Silence a compiler warning.
705 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
707 // Split the vector into intermediate operands.
708 SmallVector<SDValue, 8> Ops(NumIntermediates);
709 for (unsigned i = 0; i != NumIntermediates; ++i)
710 if (IntermediateVT.isVector())
711 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
712 IntermediateVT, Val,
713 DAG.getConstant(i * (NumElements / NumIntermediates),
714 PtrVT));
715 else
716 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
717 IntermediateVT, Val,
718 DAG.getConstant(i, PtrVT));
720 // Split the intermediate operands into legal parts.
721 if (NumParts == NumIntermediates) {
722 // If the register was not expanded, promote or copy the value,
723 // as appropriate.
724 for (unsigned i = 0; i != NumParts; ++i)
725 getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
726 } else if (NumParts > 0) {
727 // If the intermediate type was expanded, split each the value into
728 // legal parts.
729 assert(NumParts % NumIntermediates == 0 &&
730 "Must expand into a divisible number of parts!");
731 unsigned Factor = NumParts / NumIntermediates;
732 for (unsigned i = 0; i != NumIntermediates; ++i)
733 getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
738 void SelectionDAGLowering::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
739 AA = &aa;
740 GFI = gfi;
741 TD = DAG.getTarget().getTargetData();
744 /// clear - Clear out the curret SelectionDAG and the associated
745 /// state and prepare this SelectionDAGLowering object to be used
746 /// for a new block. This doesn't clear out information about
747 /// additional blocks that are needed to complete switch lowering
748 /// or PHI node updating; that information is cleared out as it is
749 /// consumed.
750 void SelectionDAGLowering::clear() {
751 NodeMap.clear();
752 PendingLoads.clear();
753 PendingExports.clear();
754 DAG.clear();
755 CurDebugLoc = DebugLoc::getUnknownLoc();
756 HasTailCall = false;
759 /// getRoot - Return the current virtual root of the Selection DAG,
760 /// flushing any PendingLoad items. This must be done before emitting
761 /// a store or any other node that may need to be ordered after any
762 /// prior load instructions.
764 SDValue SelectionDAGLowering::getRoot() {
765 if (PendingLoads.empty())
766 return DAG.getRoot();
768 if (PendingLoads.size() == 1) {
769 SDValue Root = PendingLoads[0];
770 DAG.setRoot(Root);
771 PendingLoads.clear();
772 return Root;
775 // Otherwise, we have to make a token factor node.
776 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
777 &PendingLoads[0], PendingLoads.size());
778 PendingLoads.clear();
779 DAG.setRoot(Root);
780 return Root;
783 /// getControlRoot - Similar to getRoot, but instead of flushing all the
784 /// PendingLoad items, flush all the PendingExports items. It is necessary
785 /// to do this before emitting a terminator instruction.
787 SDValue SelectionDAGLowering::getControlRoot() {
788 SDValue Root = DAG.getRoot();
790 if (PendingExports.empty())
791 return Root;
793 // Turn all of the CopyToReg chains into one factored node.
794 if (Root.getOpcode() != ISD::EntryToken) {
795 unsigned i = 0, e = PendingExports.size();
796 for (; i != e; ++i) {
797 assert(PendingExports[i].getNode()->getNumOperands() > 1);
798 if (PendingExports[i].getNode()->getOperand(0) == Root)
799 break; // Don't add the root if we already indirectly depend on it.
802 if (i == e)
803 PendingExports.push_back(Root);
806 Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
807 &PendingExports[0],
808 PendingExports.size());
809 PendingExports.clear();
810 DAG.setRoot(Root);
811 return Root;
814 void SelectionDAGLowering::visit(Instruction &I) {
815 visit(I.getOpcode(), I);
818 void SelectionDAGLowering::visit(unsigned Opcode, User &I) {
819 // Note: this doesn't use InstVisitor, because it has to work with
820 // ConstantExpr's in addition to instructions.
821 switch (Opcode) {
822 default: llvm_unreachable("Unknown instruction type encountered!");
823 // Build the switch statement using the Instruction.def file.
824 #define HANDLE_INST(NUM, OPCODE, CLASS) \
825 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
826 #include "llvm/Instruction.def"
830 SDValue SelectionDAGLowering::getValue(const Value *V) {
831 SDValue &N = NodeMap[V];
832 if (N.getNode()) return N;
834 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
835 EVT VT = TLI.getValueType(V->getType(), true);
837 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
838 return N = DAG.getConstant(*CI, VT);
840 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
841 return N = DAG.getGlobalAddress(GV, VT);
843 if (isa<ConstantPointerNull>(C))
844 return N = DAG.getConstant(0, TLI.getPointerTy());
846 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
847 return N = DAG.getConstantFP(*CFP, VT);
849 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
850 return N = DAG.getUNDEF(VT);
852 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
853 visit(CE->getOpcode(), *CE);
854 SDValue N1 = NodeMap[V];
855 assert(N1.getNode() && "visit didn't populate the ValueMap!");
856 return N1;
859 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
860 SmallVector<SDValue, 4> Constants;
861 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
862 OI != OE; ++OI) {
863 SDNode *Val = getValue(*OI).getNode();
864 // If the operand is an empty aggregate, there are no values.
865 if (!Val) continue;
866 // Add each leaf value from the operand to the Constants list
867 // to form a flattened list of all the values.
868 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
869 Constants.push_back(SDValue(Val, i));
871 return DAG.getMergeValues(&Constants[0], Constants.size(),
872 getCurDebugLoc());
875 if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
876 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
877 "Unknown struct or array constant!");
879 SmallVector<EVT, 4> ValueVTs;
880 ComputeValueVTs(TLI, C->getType(), ValueVTs);
881 unsigned NumElts = ValueVTs.size();
882 if (NumElts == 0)
883 return SDValue(); // empty struct
884 SmallVector<SDValue, 4> Constants(NumElts);
885 for (unsigned i = 0; i != NumElts; ++i) {
886 EVT EltVT = ValueVTs[i];
887 if (isa<UndefValue>(C))
888 Constants[i] = DAG.getUNDEF(EltVT);
889 else if (EltVT.isFloatingPoint())
890 Constants[i] = DAG.getConstantFP(0, EltVT);
891 else
892 Constants[i] = DAG.getConstant(0, EltVT);
894 return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc());
897 const VectorType *VecTy = cast<VectorType>(V->getType());
898 unsigned NumElements = VecTy->getNumElements();
900 // Now that we know the number and type of the elements, get that number of
901 // elements into the Ops array based on what kind of constant it is.
902 SmallVector<SDValue, 16> Ops;
903 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
904 for (unsigned i = 0; i != NumElements; ++i)
905 Ops.push_back(getValue(CP->getOperand(i)));
906 } else {
907 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
908 EVT EltVT = TLI.getValueType(VecTy->getElementType());
910 SDValue Op;
911 if (EltVT.isFloatingPoint())
912 Op = DAG.getConstantFP(0, EltVT);
913 else
914 Op = DAG.getConstant(0, EltVT);
915 Ops.assign(NumElements, Op);
918 // Create a BUILD_VECTOR node.
919 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
920 VT, &Ops[0], Ops.size());
923 // If this is a static alloca, generate it as the frameindex instead of
924 // computation.
925 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
926 DenseMap<const AllocaInst*, int>::iterator SI =
927 FuncInfo.StaticAllocaMap.find(AI);
928 if (SI != FuncInfo.StaticAllocaMap.end())
929 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
932 unsigned InReg = FuncInfo.ValueMap[V];
933 assert(InReg && "Value not in map!");
935 RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
936 SDValue Chain = DAG.getEntryNode();
937 return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
941 void SelectionDAGLowering::visitRet(ReturnInst &I) {
942 SDValue Chain = getControlRoot();
943 SmallVector<ISD::OutputArg, 8> Outs;
944 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
945 SmallVector<EVT, 4> ValueVTs;
946 ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
947 unsigned NumValues = ValueVTs.size();
948 if (NumValues == 0) continue;
950 SDValue RetOp = getValue(I.getOperand(i));
951 for (unsigned j = 0, f = NumValues; j != f; ++j) {
952 EVT VT = ValueVTs[j];
954 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
956 const Function *F = I.getParent()->getParent();
957 if (F->paramHasAttr(0, Attribute::SExt))
958 ExtendKind = ISD::SIGN_EXTEND;
959 else if (F->paramHasAttr(0, Attribute::ZExt))
960 ExtendKind = ISD::ZERO_EXTEND;
962 // FIXME: C calling convention requires the return type to be promoted to
963 // at least 32-bit. But this is not necessary for non-C calling
964 // conventions. The frontend should mark functions whose return values
965 // require promoting with signext or zeroext attributes.
966 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
967 EVT MinVT = TLI.getRegisterType(*DAG.getContext(), MVT::i32);
968 if (VT.bitsLT(MinVT))
969 VT = MinVT;
972 unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
973 EVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
974 SmallVector<SDValue, 4> Parts(NumParts);
975 getCopyToParts(DAG, getCurDebugLoc(),
976 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
977 &Parts[0], NumParts, PartVT, ExtendKind);
979 // 'inreg' on function refers to return value
980 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
981 if (F->paramHasAttr(0, Attribute::InReg))
982 Flags.setInReg();
984 // Propagate extension type if any
985 if (F->paramHasAttr(0, Attribute::SExt))
986 Flags.setSExt();
987 else if (F->paramHasAttr(0, Attribute::ZExt))
988 Flags.setZExt();
990 for (unsigned i = 0; i < NumParts; ++i)
991 Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
995 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
996 CallingConv::ID CallConv =
997 DAG.getMachineFunction().getFunction()->getCallingConv();
998 Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
999 Outs, getCurDebugLoc(), DAG);
1001 // Verify that the target's LowerReturn behaved as expected.
1002 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1003 "LowerReturn didn't return a valid chain!");
1005 // Update the DAG with the new chain value resulting from return lowering.
1006 DAG.setRoot(Chain);
1009 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1010 /// created for it, emit nodes to copy the value into the virtual
1011 /// registers.
1012 void SelectionDAGLowering::CopyToExportRegsIfNeeded(Value *V) {
1013 if (!V->use_empty()) {
1014 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1015 if (VMI != FuncInfo.ValueMap.end())
1016 CopyValueToVirtualRegister(V, VMI->second);
1020 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1021 /// the current basic block, add it to ValueMap now so that we'll get a
1022 /// CopyTo/FromReg.
1023 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
1024 // No need to export constants.
1025 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1027 // Already exported?
1028 if (FuncInfo.isExportedInst(V)) return;
1030 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1031 CopyValueToVirtualRegister(V, Reg);
1034 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
1035 const BasicBlock *FromBB) {
1036 // The operands of the setcc have to be in this block. We don't know
1037 // how to export them from some other block.
1038 if (Instruction *VI = dyn_cast<Instruction>(V)) {
1039 // Can export from current BB.
1040 if (VI->getParent() == FromBB)
1041 return true;
1043 // Is already exported, noop.
1044 return FuncInfo.isExportedInst(V);
1047 // If this is an argument, we can export it if the BB is the entry block or
1048 // if it is already exported.
1049 if (isa<Argument>(V)) {
1050 if (FromBB == &FromBB->getParent()->getEntryBlock())
1051 return true;
1053 // Otherwise, can only export this if it is already exported.
1054 return FuncInfo.isExportedInst(V);
1057 // Otherwise, constants can always be exported.
1058 return true;
1061 static bool InBlock(const Value *V, const BasicBlock *BB) {
1062 if (const Instruction *I = dyn_cast<Instruction>(V))
1063 return I->getParent() == BB;
1064 return true;
1067 /// getFCmpCondCode - Return the ISD condition code corresponding to
1068 /// the given LLVM IR floating-point condition code. This includes
1069 /// consideration of global floating-point math flags.
1071 static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1072 ISD::CondCode FPC, FOC;
1073 switch (Pred) {
1074 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1075 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1076 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1077 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1078 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1079 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1080 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1081 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
1082 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
1083 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1084 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1085 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1086 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1087 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1088 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1089 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
1090 default:
1091 llvm_unreachable("Invalid FCmp predicate opcode!");
1092 FOC = FPC = ISD::SETFALSE;
1093 break;
1095 if (FiniteOnlyFPMath())
1096 return FOC;
1097 else
1098 return FPC;
1101 /// getICmpCondCode - Return the ISD condition code corresponding to
1102 /// the given LLVM IR integer condition code.
1104 static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1105 switch (Pred) {
1106 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
1107 case ICmpInst::ICMP_NE: return ISD::SETNE;
1108 case ICmpInst::ICMP_SLE: return ISD::SETLE;
1109 case ICmpInst::ICMP_ULE: return ISD::SETULE;
1110 case ICmpInst::ICMP_SGE: return ISD::SETGE;
1111 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1112 case ICmpInst::ICMP_SLT: return ISD::SETLT;
1113 case ICmpInst::ICMP_ULT: return ISD::SETULT;
1114 case ICmpInst::ICMP_SGT: return ISD::SETGT;
1115 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1116 default:
1117 llvm_unreachable("Invalid ICmp predicate opcode!");
1118 return ISD::SETNE;
1122 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1123 /// This function emits a branch and is used at the leaves of an OR or an
1124 /// AND operator tree.
1126 void
1127 SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
1128 MachineBasicBlock *TBB,
1129 MachineBasicBlock *FBB,
1130 MachineBasicBlock *CurBB) {
1131 const BasicBlock *BB = CurBB->getBasicBlock();
1133 // If the leaf of the tree is a comparison, merge the condition into
1134 // the caseblock.
1135 if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1136 // The operands of the cmp have to be in this block. We don't know
1137 // how to export them from some other block. If this is the first block
1138 // of the sequence, no exporting is needed.
1139 if (CurBB == CurMBB ||
1140 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1141 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1142 ISD::CondCode Condition;
1143 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1144 Condition = getICmpCondCode(IC->getPredicate());
1145 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1146 Condition = getFCmpCondCode(FC->getPredicate());
1147 } else {
1148 Condition = ISD::SETEQ; // silence warning.
1149 llvm_unreachable("Unknown compare instruction");
1152 CaseBlock CB(Condition, BOp->getOperand(0),
1153 BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1154 SwitchCases.push_back(CB);
1155 return;
1159 // Create a CaseBlock record representing this branch.
1160 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1161 NULL, TBB, FBB, CurBB);
1162 SwitchCases.push_back(CB);
1165 /// FindMergedConditions - If Cond is an expression like
1166 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
1167 MachineBasicBlock *TBB,
1168 MachineBasicBlock *FBB,
1169 MachineBasicBlock *CurBB,
1170 unsigned Opc) {
1171 // If this node is not part of the or/and tree, emit it as a branch.
1172 Instruction *BOp = dyn_cast<Instruction>(Cond);
1173 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1174 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1175 BOp->getParent() != CurBB->getBasicBlock() ||
1176 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1177 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1178 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1179 return;
1182 // Create TmpBB after CurBB.
1183 MachineFunction::iterator BBI = CurBB;
1184 MachineFunction &MF = DAG.getMachineFunction();
1185 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1186 CurBB->getParent()->insert(++BBI, TmpBB);
1188 if (Opc == Instruction::Or) {
1189 // Codegen X | Y as:
1190 // jmp_if_X TBB
1191 // jmp TmpBB
1192 // TmpBB:
1193 // jmp_if_Y TBB
1194 // jmp FBB
1197 // Emit the LHS condition.
1198 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1200 // Emit the RHS condition into TmpBB.
1201 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1202 } else {
1203 assert(Opc == Instruction::And && "Unknown merge op!");
1204 // Codegen X & Y as:
1205 // jmp_if_X TmpBB
1206 // jmp FBB
1207 // TmpBB:
1208 // jmp_if_Y TBB
1209 // jmp FBB
1211 // This requires creation of TmpBB after CurBB.
1213 // Emit the LHS condition.
1214 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1216 // Emit the RHS condition into TmpBB.
1217 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1221 /// If the set of cases should be emitted as a series of branches, return true.
1222 /// If we should emit this as a bunch of and/or'd together conditions, return
1223 /// false.
1224 bool
1225 SelectionDAGLowering::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1226 if (Cases.size() != 2) return true;
1228 // If this is two comparisons of the same values or'd or and'd together, they
1229 // will get folded into a single comparison, so don't emit two blocks.
1230 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1231 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1232 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1233 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1234 return false;
1237 return true;
1240 void SelectionDAGLowering::visitBr(BranchInst &I) {
1241 // Update machine-CFG edges.
1242 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1244 // Figure out which block is immediately after the current one.
1245 MachineBasicBlock *NextBlock = 0;
1246 MachineFunction::iterator BBI = CurMBB;
1247 if (++BBI != FuncInfo.MF->end())
1248 NextBlock = BBI;
1250 if (I.isUnconditional()) {
1251 // Update machine-CFG edges.
1252 CurMBB->addSuccessor(Succ0MBB);
1254 // If this is not a fall-through branch, emit the branch.
1255 if (Succ0MBB != NextBlock)
1256 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1257 MVT::Other, getControlRoot(),
1258 DAG.getBasicBlock(Succ0MBB)));
1259 return;
1262 // If this condition is one of the special cases we handle, do special stuff
1263 // now.
1264 Value *CondVal = I.getCondition();
1265 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1267 // If this is a series of conditions that are or'd or and'd together, emit
1268 // this as a sequence of branches instead of setcc's with and/or operations.
1269 // For example, instead of something like:
1270 // cmp A, B
1271 // C = seteq
1272 // cmp D, E
1273 // F = setle
1274 // or C, F
1275 // jnz foo
1276 // Emit:
1277 // cmp A, B
1278 // je foo
1279 // cmp D, E
1280 // jle foo
1282 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1283 if (BOp->hasOneUse() &&
1284 (BOp->getOpcode() == Instruction::And ||
1285 BOp->getOpcode() == Instruction::Or)) {
1286 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1287 // If the compares in later blocks need to use values not currently
1288 // exported from this block, export them now. This block should always
1289 // be the first entry.
1290 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1292 // Allow some cases to be rejected.
1293 if (ShouldEmitAsBranches(SwitchCases)) {
1294 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1295 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1296 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1299 // Emit the branch for this block.
1300 visitSwitchCase(SwitchCases[0]);
1301 SwitchCases.erase(SwitchCases.begin());
1302 return;
1305 // Okay, we decided not to do this, remove any inserted MBB's and clear
1306 // SwitchCases.
1307 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1308 FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1310 SwitchCases.clear();
1314 // Create a CaseBlock record representing this branch.
1315 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1316 NULL, Succ0MBB, Succ1MBB, CurMBB);
1317 // Use visitSwitchCase to actually insert the fast branch sequence for this
1318 // cond branch.
1319 visitSwitchCase(CB);
1322 /// visitSwitchCase - Emits the necessary code to represent a single node in
1323 /// the binary search tree resulting from lowering a switch instruction.
1324 void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
1325 SDValue Cond;
1326 SDValue CondLHS = getValue(CB.CmpLHS);
1327 DebugLoc dl = getCurDebugLoc();
1329 // Build the setcc now.
1330 if (CB.CmpMHS == NULL) {
1331 // Fold "(X == true)" to X and "(X == false)" to !X to
1332 // handle common cases produced by branch lowering.
1333 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1334 CB.CC == ISD::SETEQ)
1335 Cond = CondLHS;
1336 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1337 CB.CC == ISD::SETEQ) {
1338 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1339 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1340 } else
1341 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1342 } else {
1343 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1345 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1346 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1348 SDValue CmpOp = getValue(CB.CmpMHS);
1349 EVT VT = CmpOp.getValueType();
1351 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1352 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1353 ISD::SETLE);
1354 } else {
1355 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1356 VT, CmpOp, DAG.getConstant(Low, VT));
1357 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1358 DAG.getConstant(High-Low, VT), ISD::SETULE);
1362 // Update successor info
1363 CurMBB->addSuccessor(CB.TrueBB);
1364 CurMBB->addSuccessor(CB.FalseBB);
1366 // Set NextBlock to be the MBB immediately after the current one, if any.
1367 // This is used to avoid emitting unnecessary branches to the next block.
1368 MachineBasicBlock *NextBlock = 0;
1369 MachineFunction::iterator BBI = CurMBB;
1370 if (++BBI != FuncInfo.MF->end())
1371 NextBlock = BBI;
1373 // If the lhs block is the next block, invert the condition so that we can
1374 // fall through to the lhs instead of the rhs block.
1375 if (CB.TrueBB == NextBlock) {
1376 std::swap(CB.TrueBB, CB.FalseBB);
1377 SDValue True = DAG.getConstant(1, Cond.getValueType());
1378 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1380 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1381 MVT::Other, getControlRoot(), Cond,
1382 DAG.getBasicBlock(CB.TrueBB));
1384 // If the branch was constant folded, fix up the CFG.
1385 if (BrCond.getOpcode() == ISD::BR) {
1386 CurMBB->removeSuccessor(CB.FalseBB);
1387 DAG.setRoot(BrCond);
1388 } else {
1389 // Otherwise, go ahead and insert the false branch.
1390 if (BrCond == getControlRoot())
1391 CurMBB->removeSuccessor(CB.TrueBB);
1393 if (CB.FalseBB == NextBlock)
1394 DAG.setRoot(BrCond);
1395 else
1396 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1397 DAG.getBasicBlock(CB.FalseBB)));
1401 /// visitJumpTable - Emit JumpTable node in the current MBB
1402 void SelectionDAGLowering::visitJumpTable(JumpTable &JT) {
1403 // Emit the code for the jump table
1404 assert(JT.Reg != -1U && "Should lower JT Header first!");
1405 EVT PTy = TLI.getPointerTy();
1406 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1407 JT.Reg, PTy);
1408 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1409 DAG.setRoot(DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1410 MVT::Other, Index.getValue(1),
1411 Table, Index));
1414 /// visitJumpTableHeader - This function emits necessary code to produce index
1415 /// in the JumpTable from switch case.
1416 void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
1417 JumpTableHeader &JTH) {
1418 // Subtract the lowest switch case value from the value being switched on and
1419 // conditional branch to default mbb if the result is greater than the
1420 // difference between smallest and largest cases.
1421 SDValue SwitchOp = getValue(JTH.SValue);
1422 EVT VT = SwitchOp.getValueType();
1423 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1424 DAG.getConstant(JTH.First, VT));
1426 // The SDNode we just created, which holds the value being switched on minus
1427 // the the smallest case value, needs to be copied to a virtual register so it
1428 // can be used as an index into the jump table in a subsequent basic block.
1429 // This value may be smaller or larger than the target's pointer type, and
1430 // therefore require extension or truncating.
1431 if (VT.bitsGT(TLI.getPointerTy()))
1432 SwitchOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1433 TLI.getPointerTy(), SUB);
1434 else
1435 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1436 TLI.getPointerTy(), SUB);
1438 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1439 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1440 JumpTableReg, SwitchOp);
1441 JT.Reg = JumpTableReg;
1443 // Emit the range check for the jump table, and branch to the default block
1444 // for the switch statement if the value being switched on exceeds the largest
1445 // case in the switch.
1446 SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1447 TLI.getSetCCResultType(SUB.getValueType()), SUB,
1448 DAG.getConstant(JTH.Last-JTH.First,VT),
1449 ISD::SETUGT);
1451 // Set NextBlock to be the MBB immediately after the current one, if any.
1452 // This is used to avoid emitting unnecessary branches to the next block.
1453 MachineBasicBlock *NextBlock = 0;
1454 MachineFunction::iterator BBI = CurMBB;
1455 if (++BBI != FuncInfo.MF->end())
1456 NextBlock = BBI;
1458 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1459 MVT::Other, CopyTo, CMP,
1460 DAG.getBasicBlock(JT.Default));
1462 if (JT.MBB == NextBlock)
1463 DAG.setRoot(BrCond);
1464 else
1465 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1466 DAG.getBasicBlock(JT.MBB)));
1469 /// visitBitTestHeader - This function emits necessary code to produce value
1470 /// suitable for "bit tests"
1471 void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
1472 // Subtract the minimum value
1473 SDValue SwitchOp = getValue(B.SValue);
1474 EVT VT = SwitchOp.getValueType();
1475 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1476 DAG.getConstant(B.First, VT));
1478 // Check range
1479 SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1480 TLI.getSetCCResultType(SUB.getValueType()),
1481 SUB, DAG.getConstant(B.Range, VT),
1482 ISD::SETUGT);
1484 SDValue ShiftOp;
1485 if (VT.bitsGT(TLI.getPointerTy()))
1486 ShiftOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1487 TLI.getPointerTy(), SUB);
1488 else
1489 ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1490 TLI.getPointerTy(), SUB);
1492 B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1493 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1494 B.Reg, ShiftOp);
1496 // Set NextBlock to be the MBB immediately after the current one, if any.
1497 // This is used to avoid emitting unnecessary branches to the next block.
1498 MachineBasicBlock *NextBlock = 0;
1499 MachineFunction::iterator BBI = CurMBB;
1500 if (++BBI != FuncInfo.MF->end())
1501 NextBlock = BBI;
1503 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1505 CurMBB->addSuccessor(B.Default);
1506 CurMBB->addSuccessor(MBB);
1508 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1509 MVT::Other, CopyTo, RangeCmp,
1510 DAG.getBasicBlock(B.Default));
1512 if (MBB == NextBlock)
1513 DAG.setRoot(BrRange);
1514 else
1515 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1516 DAG.getBasicBlock(MBB)));
1519 /// visitBitTestCase - this function produces one "bit test"
1520 void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB,
1521 unsigned Reg,
1522 BitTestCase &B) {
1523 // Make desired shift
1524 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1525 TLI.getPointerTy());
1526 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1527 TLI.getPointerTy(),
1528 DAG.getConstant(1, TLI.getPointerTy()),
1529 ShiftOp);
1531 // Emit bit tests and jumps
1532 SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1533 TLI.getPointerTy(), SwitchVal,
1534 DAG.getConstant(B.Mask, TLI.getPointerTy()));
1535 SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1536 TLI.getSetCCResultType(AndOp.getValueType()),
1537 AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1538 ISD::SETNE);
1540 CurMBB->addSuccessor(B.TargetBB);
1541 CurMBB->addSuccessor(NextMBB);
1543 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1544 MVT::Other, getControlRoot(),
1545 AndCmp, DAG.getBasicBlock(B.TargetBB));
1547 // Set NextBlock to be the MBB immediately after the current one, if any.
1548 // This is used to avoid emitting unnecessary branches to the next block.
1549 MachineBasicBlock *NextBlock = 0;
1550 MachineFunction::iterator BBI = CurMBB;
1551 if (++BBI != FuncInfo.MF->end())
1552 NextBlock = BBI;
1554 if (NextMBB == NextBlock)
1555 DAG.setRoot(BrAnd);
1556 else
1557 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1558 DAG.getBasicBlock(NextMBB)));
1561 void SelectionDAGLowering::visitInvoke(InvokeInst &I) {
1562 // Retrieve successors.
1563 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1564 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1566 const Value *Callee(I.getCalledValue());
1567 if (isa<InlineAsm>(Callee))
1568 visitInlineAsm(&I);
1569 else
1570 LowerCallTo(&I, getValue(Callee), false, LandingPad);
1572 // If the value of the invoke is used outside of its defining block, make it
1573 // available as a virtual register.
1574 CopyToExportRegsIfNeeded(&I);
1576 // Update successor info
1577 CurMBB->addSuccessor(Return);
1578 CurMBB->addSuccessor(LandingPad);
1580 // Drop into normal successor.
1581 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1582 MVT::Other, getControlRoot(),
1583 DAG.getBasicBlock(Return)));
1586 void SelectionDAGLowering::visitUnwind(UnwindInst &I) {
1589 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1590 /// small case ranges).
1591 bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR,
1592 CaseRecVector& WorkList,
1593 Value* SV,
1594 MachineBasicBlock* Default) {
1595 Case& BackCase = *(CR.Range.second-1);
1597 // Size is the number of Cases represented by this range.
1598 size_t Size = CR.Range.second - CR.Range.first;
1599 if (Size > 3)
1600 return false;
1602 // Get the MachineFunction which holds the current MBB. This is used when
1603 // inserting any additional MBBs necessary to represent the switch.
1604 MachineFunction *CurMF = FuncInfo.MF;
1606 // Figure out which block is immediately after the current one.
1607 MachineBasicBlock *NextBlock = 0;
1608 MachineFunction::iterator BBI = CR.CaseBB;
1610 if (++BBI != FuncInfo.MF->end())
1611 NextBlock = BBI;
1613 // TODO: If any two of the cases has the same destination, and if one value
1614 // is the same as the other, but has one bit unset that the other has set,
1615 // use bit manipulation to do two compares at once. For example:
1616 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1618 // Rearrange the case blocks so that the last one falls through if possible.
1619 if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1620 // The last case block won't fall through into 'NextBlock' if we emit the
1621 // branches in this order. See if rearranging a case value would help.
1622 for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1623 if (I->BB == NextBlock) {
1624 std::swap(*I, BackCase);
1625 break;
1630 // Create a CaseBlock record representing a conditional branch to
1631 // the Case's target mbb if the value being switched on SV is equal
1632 // to C.
1633 MachineBasicBlock *CurBlock = CR.CaseBB;
1634 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1635 MachineBasicBlock *FallThrough;
1636 if (I != E-1) {
1637 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1638 CurMF->insert(BBI, FallThrough);
1640 // Put SV in a virtual register to make it available from the new blocks.
1641 ExportFromCurrentBlock(SV);
1642 } else {
1643 // If the last case doesn't match, go to the default block.
1644 FallThrough = Default;
1647 Value *RHS, *LHS, *MHS;
1648 ISD::CondCode CC;
1649 if (I->High == I->Low) {
1650 // This is just small small case range :) containing exactly 1 case
1651 CC = ISD::SETEQ;
1652 LHS = SV; RHS = I->High; MHS = NULL;
1653 } else {
1654 CC = ISD::SETLE;
1655 LHS = I->Low; MHS = SV; RHS = I->High;
1657 CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1659 // If emitting the first comparison, just call visitSwitchCase to emit the
1660 // code into the current block. Otherwise, push the CaseBlock onto the
1661 // vector to be later processed by SDISel, and insert the node's MBB
1662 // before the next MBB.
1663 if (CurBlock == CurMBB)
1664 visitSwitchCase(CB);
1665 else
1666 SwitchCases.push_back(CB);
1668 CurBlock = FallThrough;
1671 return true;
1674 static inline bool areJTsAllowed(const TargetLowering &TLI) {
1675 return !DisableJumpTables &&
1676 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1677 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1680 static APInt ComputeRange(const APInt &First, const APInt &Last) {
1681 APInt LastExt(Last), FirstExt(First);
1682 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1683 LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1684 return (LastExt - FirstExt + 1ULL);
1687 /// handleJTSwitchCase - Emit jumptable for current switch case range
1688 bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR,
1689 CaseRecVector& WorkList,
1690 Value* SV,
1691 MachineBasicBlock* Default) {
1692 Case& FrontCase = *CR.Range.first;
1693 Case& BackCase = *(CR.Range.second-1);
1695 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1696 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1698 size_t TSize = 0;
1699 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1700 I!=E; ++I)
1701 TSize += I->size();
1703 if (!areJTsAllowed(TLI) || TSize <= 3)
1704 return false;
1706 APInt Range = ComputeRange(First, Last);
1707 double Density = (double)TSize / Range.roundToDouble();
1708 if (Density < 0.4)
1709 return false;
1711 DEBUG(errs() << "Lowering jump table\n"
1712 << "First entry: " << First << ". Last entry: " << Last << '\n'
1713 << "Range: " << Range
1714 << "Size: " << TSize << ". Density: " << Density << "\n\n");
1716 // Get the MachineFunction which holds the current MBB. This is used when
1717 // inserting any additional MBBs necessary to represent the switch.
1718 MachineFunction *CurMF = FuncInfo.MF;
1720 // Figure out which block is immediately after the current one.
1721 MachineFunction::iterator BBI = CR.CaseBB;
1722 ++BBI;
1724 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1726 // Create a new basic block to hold the code for loading the address
1727 // of the jump table, and jumping to it. Update successor information;
1728 // we will either branch to the default case for the switch, or the jump
1729 // table.
1730 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1731 CurMF->insert(BBI, JumpTableBB);
1732 CR.CaseBB->addSuccessor(Default);
1733 CR.CaseBB->addSuccessor(JumpTableBB);
1735 // Build a vector of destination BBs, corresponding to each target
1736 // of the jump table. If the value of the jump table slot corresponds to
1737 // a case statement, push the case's BB onto the vector, otherwise, push
1738 // the default BB.
1739 std::vector<MachineBasicBlock*> DestBBs;
1740 APInt TEI = First;
1741 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1742 const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1743 const APInt& High = cast<ConstantInt>(I->High)->getValue();
1745 if (Low.sle(TEI) && TEI.sle(High)) {
1746 DestBBs.push_back(I->BB);
1747 if (TEI==High)
1748 ++I;
1749 } else {
1750 DestBBs.push_back(Default);
1754 // Update successor info. Add one edge to each unique successor.
1755 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1756 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1757 E = DestBBs.end(); I != E; ++I) {
1758 if (!SuccsHandled[(*I)->getNumber()]) {
1759 SuccsHandled[(*I)->getNumber()] = true;
1760 JumpTableBB->addSuccessor(*I);
1764 // Create a jump table index for this jump table, or return an existing
1765 // one.
1766 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1768 // Set the jump table information so that we can codegen it as a second
1769 // MachineBasicBlock
1770 JumpTable JT(-1U, JTI, JumpTableBB, Default);
1771 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1772 if (CR.CaseBB == CurMBB)
1773 visitJumpTableHeader(JT, JTH);
1775 JTCases.push_back(JumpTableBlock(JTH, JT));
1777 return true;
1780 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1781 /// 2 subtrees.
1782 bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR,
1783 CaseRecVector& WorkList,
1784 Value* SV,
1785 MachineBasicBlock* Default) {
1786 // Get the MachineFunction which holds the current MBB. This is used when
1787 // inserting any additional MBBs necessary to represent the switch.
1788 MachineFunction *CurMF = FuncInfo.MF;
1790 // Figure out which block is immediately after the current one.
1791 MachineFunction::iterator BBI = CR.CaseBB;
1792 ++BBI;
1794 Case& FrontCase = *CR.Range.first;
1795 Case& BackCase = *(CR.Range.second-1);
1796 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1798 // Size is the number of Cases represented by this range.
1799 unsigned Size = CR.Range.second - CR.Range.first;
1801 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1802 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1803 double FMetric = 0;
1804 CaseItr Pivot = CR.Range.first + Size/2;
1806 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1807 // (heuristically) allow us to emit JumpTable's later.
1808 size_t TSize = 0;
1809 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1810 I!=E; ++I)
1811 TSize += I->size();
1813 size_t LSize = FrontCase.size();
1814 size_t RSize = TSize-LSize;
1815 DEBUG(errs() << "Selecting best pivot: \n"
1816 << "First: " << First << ", Last: " << Last <<'\n'
1817 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1818 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1819 J!=E; ++I, ++J) {
1820 const APInt& LEnd = cast<ConstantInt>(I->High)->getValue();
1821 const APInt& RBegin = cast<ConstantInt>(J->Low)->getValue();
1822 APInt Range = ComputeRange(LEnd, RBegin);
1823 assert((Range - 2ULL).isNonNegative() &&
1824 "Invalid case distance");
1825 double LDensity = (double)LSize / (LEnd - First + 1ULL).roundToDouble();
1826 double RDensity = (double)RSize / (Last - RBegin + 1ULL).roundToDouble();
1827 double Metric = Range.logBase2()*(LDensity+RDensity);
1828 // Should always split in some non-trivial place
1829 DEBUG(errs() <<"=>Step\n"
1830 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1831 << "LDensity: " << LDensity
1832 << ", RDensity: " << RDensity << '\n'
1833 << "Metric: " << Metric << '\n');
1834 if (FMetric < Metric) {
1835 Pivot = J;
1836 FMetric = Metric;
1837 DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1840 LSize += J->size();
1841 RSize -= J->size();
1843 if (areJTsAllowed(TLI)) {
1844 // If our case is dense we *really* should handle it earlier!
1845 assert((FMetric > 0) && "Should handle dense range earlier!");
1846 } else {
1847 Pivot = CR.Range.first + Size/2;
1850 CaseRange LHSR(CR.Range.first, Pivot);
1851 CaseRange RHSR(Pivot, CR.Range.second);
1852 Constant *C = Pivot->Low;
1853 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1855 // We know that we branch to the LHS if the Value being switched on is
1856 // less than the Pivot value, C. We use this to optimize our binary
1857 // tree a bit, by recognizing that if SV is greater than or equal to the
1858 // LHS's Case Value, and that Case Value is exactly one less than the
1859 // Pivot's Value, then we can branch directly to the LHS's Target,
1860 // rather than creating a leaf node for it.
1861 if ((LHSR.second - LHSR.first) == 1 &&
1862 LHSR.first->High == CR.GE &&
1863 cast<ConstantInt>(C)->getValue() ==
1864 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1865 TrueBB = LHSR.first->BB;
1866 } else {
1867 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1868 CurMF->insert(BBI, TrueBB);
1869 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1871 // Put SV in a virtual register to make it available from the new blocks.
1872 ExportFromCurrentBlock(SV);
1875 // Similar to the optimization above, if the Value being switched on is
1876 // known to be less than the Constant CR.LT, and the current Case Value
1877 // is CR.LT - 1, then we can branch directly to the target block for
1878 // the current Case Value, rather than emitting a RHS leaf node for it.
1879 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1880 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1881 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1882 FalseBB = RHSR.first->BB;
1883 } else {
1884 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1885 CurMF->insert(BBI, FalseBB);
1886 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1888 // Put SV in a virtual register to make it available from the new blocks.
1889 ExportFromCurrentBlock(SV);
1892 // Create a CaseBlock record representing a conditional branch to
1893 // the LHS node if the value being switched on SV is less than C.
1894 // Otherwise, branch to LHS.
1895 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1897 if (CR.CaseBB == CurMBB)
1898 visitSwitchCase(CB);
1899 else
1900 SwitchCases.push_back(CB);
1902 return true;
1905 /// handleBitTestsSwitchCase - if current case range has few destination and
1906 /// range span less, than machine word bitwidth, encode case range into series
1907 /// of masks and emit bit tests with these masks.
1908 bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR,
1909 CaseRecVector& WorkList,
1910 Value* SV,
1911 MachineBasicBlock* Default){
1912 EVT PTy = TLI.getPointerTy();
1913 unsigned IntPtrBits = PTy.getSizeInBits();
1915 Case& FrontCase = *CR.Range.first;
1916 Case& BackCase = *(CR.Range.second-1);
1918 // Get the MachineFunction which holds the current MBB. This is used when
1919 // inserting any additional MBBs necessary to represent the switch.
1920 MachineFunction *CurMF = FuncInfo.MF;
1922 // If target does not have legal shift left, do not emit bit tests at all.
1923 if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1924 return false;
1926 size_t numCmps = 0;
1927 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1928 I!=E; ++I) {
1929 // Single case counts one, case range - two.
1930 numCmps += (I->Low == I->High ? 1 : 2);
1933 // Count unique destinations
1934 SmallSet<MachineBasicBlock*, 4> Dests;
1935 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1936 Dests.insert(I->BB);
1937 if (Dests.size() > 3)
1938 // Don't bother the code below, if there are too much unique destinations
1939 return false;
1941 DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1942 << "Total number of comparisons: " << numCmps << '\n');
1944 // Compute span of values.
1945 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1946 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1947 APInt cmpRange = maxValue - minValue;
1949 DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1950 << "Low bound: " << minValue << '\n'
1951 << "High bound: " << maxValue << '\n');
1953 if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1954 (!(Dests.size() == 1 && numCmps >= 3) &&
1955 !(Dests.size() == 2 && numCmps >= 5) &&
1956 !(Dests.size() >= 3 && numCmps >= 6)))
1957 return false;
1959 DEBUG(errs() << "Emitting bit tests\n");
1960 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1962 // Optimize the case where all the case values fit in a
1963 // word without having to subtract minValue. In this case,
1964 // we can optimize away the subtraction.
1965 if (minValue.isNonNegative() &&
1966 maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1967 cmpRange = maxValue;
1968 } else {
1969 lowBound = minValue;
1972 CaseBitsVector CasesBits;
1973 unsigned i, count = 0;
1975 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1976 MachineBasicBlock* Dest = I->BB;
1977 for (i = 0; i < count; ++i)
1978 if (Dest == CasesBits[i].BB)
1979 break;
1981 if (i == count) {
1982 assert((count < 3) && "Too much destinations to test!");
1983 CasesBits.push_back(CaseBits(0, Dest, 0));
1984 count++;
1987 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
1988 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
1990 uint64_t lo = (lowValue - lowBound).getZExtValue();
1991 uint64_t hi = (highValue - lowBound).getZExtValue();
1993 for (uint64_t j = lo; j <= hi; j++) {
1994 CasesBits[i].Mask |= 1ULL << j;
1995 CasesBits[i].Bits++;
1999 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
2001 BitTestInfo BTC;
2003 // Figure out which block is immediately after the current one.
2004 MachineFunction::iterator BBI = CR.CaseBB;
2005 ++BBI;
2007 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2009 DEBUG(errs() << "Cases:\n");
2010 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2011 DEBUG(errs() << "Mask: " << CasesBits[i].Mask
2012 << ", Bits: " << CasesBits[i].Bits
2013 << ", BB: " << CasesBits[i].BB << '\n');
2015 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2016 CurMF->insert(BBI, CaseBB);
2017 BTC.push_back(BitTestCase(CasesBits[i].Mask,
2018 CaseBB,
2019 CasesBits[i].BB));
2021 // Put SV in a virtual register to make it available from the new blocks.
2022 ExportFromCurrentBlock(SV);
2025 BitTestBlock BTB(lowBound, cmpRange, SV,
2026 -1U, (CR.CaseBB == CurMBB),
2027 CR.CaseBB, Default, BTC);
2029 if (CR.CaseBB == CurMBB)
2030 visitBitTestHeader(BTB);
2032 BitTestCases.push_back(BTB);
2034 return true;
2038 /// Clusterify - Transform simple list of Cases into list of CaseRange's
2039 size_t SelectionDAGLowering::Clusterify(CaseVector& Cases,
2040 const SwitchInst& SI) {
2041 size_t numCmps = 0;
2043 // Start with "simple" cases
2044 for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
2045 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
2046 Cases.push_back(Case(SI.getSuccessorValue(i),
2047 SI.getSuccessorValue(i),
2048 SMBB));
2050 std::sort(Cases.begin(), Cases.end(), CaseCmp());
2052 // Merge case into clusters
2053 if (Cases.size() >= 2)
2054 // Must recompute end() each iteration because it may be
2055 // invalidated by erase if we hold on to it
2056 for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
2057 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2058 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2059 MachineBasicBlock* nextBB = J->BB;
2060 MachineBasicBlock* currentBB = I->BB;
2062 // If the two neighboring cases go to the same destination, merge them
2063 // into a single case.
2064 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2065 I->High = J->High;
2066 J = Cases.erase(J);
2067 } else {
2068 I = J++;
2072 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2073 if (I->Low != I->High)
2074 // A range counts double, since it requires two compares.
2075 ++numCmps;
2078 return numCmps;
2081 void SelectionDAGLowering::visitSwitch(SwitchInst &SI) {
2082 // Figure out which block is immediately after the current one.
2083 MachineBasicBlock *NextBlock = 0;
2085 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2087 // If there is only the default destination, branch to it if it is not the
2088 // next basic block. Otherwise, just fall through.
2089 if (SI.getNumOperands() == 2) {
2090 // Update machine-CFG edges.
2092 // If this is not a fall-through branch, emit the branch.
2093 CurMBB->addSuccessor(Default);
2094 if (Default != NextBlock)
2095 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
2096 MVT::Other, getControlRoot(),
2097 DAG.getBasicBlock(Default)));
2098 return;
2101 // If there are any non-default case statements, create a vector of Cases
2102 // representing each one, and sort the vector so that we can efficiently
2103 // create a binary search tree from them.
2104 CaseVector Cases;
2105 size_t numCmps = Clusterify(Cases, SI);
2106 DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
2107 << ". Total compares: " << numCmps << '\n');
2108 numCmps = 0;
2110 // Get the Value to be switched on and default basic blocks, which will be
2111 // inserted into CaseBlock records, representing basic blocks in the binary
2112 // search tree.
2113 Value *SV = SI.getOperand(0);
2115 // Push the initial CaseRec onto the worklist
2116 CaseRecVector WorkList;
2117 WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2119 while (!WorkList.empty()) {
2120 // Grab a record representing a case range to process off the worklist
2121 CaseRec CR = WorkList.back();
2122 WorkList.pop_back();
2124 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2125 continue;
2127 // If the range has few cases (two or less) emit a series of specific
2128 // tests.
2129 if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2130 continue;
2132 // If the switch has more than 5 blocks, and at least 40% dense, and the
2133 // target supports indirect branches, then emit a jump table rather than
2134 // lowering the switch to a binary tree of conditional branches.
2135 if (handleJTSwitchCase(CR, WorkList, SV, Default))
2136 continue;
2138 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2139 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2140 handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2145 void SelectionDAGLowering::visitFSub(User &I) {
2146 // -0.0 - X --> fneg
2147 const Type *Ty = I.getType();
2148 if (isa<VectorType>(Ty)) {
2149 if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2150 const VectorType *DestTy = cast<VectorType>(I.getType());
2151 const Type *ElTy = DestTy->getElementType();
2152 unsigned VL = DestTy->getNumElements();
2153 std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2154 Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2155 if (CV == CNZ) {
2156 SDValue Op2 = getValue(I.getOperand(1));
2157 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2158 Op2.getValueType(), Op2));
2159 return;
2163 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2164 if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2165 SDValue Op2 = getValue(I.getOperand(1));
2166 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2167 Op2.getValueType(), Op2));
2168 return;
2171 visitBinary(I, ISD::FSUB);
2174 void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) {
2175 SDValue Op1 = getValue(I.getOperand(0));
2176 SDValue Op2 = getValue(I.getOperand(1));
2178 setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
2179 Op1.getValueType(), Op1, Op2));
2182 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
2183 SDValue Op1 = getValue(I.getOperand(0));
2184 SDValue Op2 = getValue(I.getOperand(1));
2185 if (!isa<VectorType>(I.getType()) &&
2186 Op2.getValueType() != TLI.getShiftAmountTy()) {
2187 // If the operand is smaller than the shift count type, promote it.
2188 EVT PTy = TLI.getPointerTy();
2189 EVT STy = TLI.getShiftAmountTy();
2190 if (STy.bitsGT(Op2.getValueType()))
2191 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2192 TLI.getShiftAmountTy(), Op2);
2193 // If the operand is larger than the shift count type but the shift
2194 // count type has enough bits to represent any shift value, truncate
2195 // it now. This is a common case and it exposes the truncate to
2196 // optimization early.
2197 else if (STy.getSizeInBits() >=
2198 Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2199 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2200 TLI.getShiftAmountTy(), Op2);
2201 // Otherwise we'll need to temporarily settle for some other
2202 // convenient type; type legalization will make adjustments as
2203 // needed.
2204 else if (PTy.bitsLT(Op2.getValueType()))
2205 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2206 TLI.getPointerTy(), Op2);
2207 else if (PTy.bitsGT(Op2.getValueType()))
2208 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2209 TLI.getPointerTy(), Op2);
2212 setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
2213 Op1.getValueType(), Op1, Op2));
2216 void SelectionDAGLowering::visitICmp(User &I) {
2217 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2218 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2219 predicate = IC->getPredicate();
2220 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2221 predicate = ICmpInst::Predicate(IC->getPredicate());
2222 SDValue Op1 = getValue(I.getOperand(0));
2223 SDValue Op2 = getValue(I.getOperand(1));
2224 ISD::CondCode Opcode = getICmpCondCode(predicate);
2226 EVT DestVT = TLI.getValueType(I.getType());
2227 setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
2230 void SelectionDAGLowering::visitFCmp(User &I) {
2231 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2232 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2233 predicate = FC->getPredicate();
2234 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2235 predicate = FCmpInst::Predicate(FC->getPredicate());
2236 SDValue Op1 = getValue(I.getOperand(0));
2237 SDValue Op2 = getValue(I.getOperand(1));
2238 ISD::CondCode Condition = getFCmpCondCode(predicate);
2239 EVT DestVT = TLI.getValueType(I.getType());
2240 setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
2243 void SelectionDAGLowering::visitSelect(User &I) {
2244 SmallVector<EVT, 4> ValueVTs;
2245 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2246 unsigned NumValues = ValueVTs.size();
2247 if (NumValues != 0) {
2248 SmallVector<SDValue, 4> Values(NumValues);
2249 SDValue Cond = getValue(I.getOperand(0));
2250 SDValue TrueVal = getValue(I.getOperand(1));
2251 SDValue FalseVal = getValue(I.getOperand(2));
2253 for (unsigned i = 0; i != NumValues; ++i)
2254 Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2255 TrueVal.getValueType(), Cond,
2256 SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
2257 SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
2259 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2260 DAG.getVTList(&ValueVTs[0], NumValues),
2261 &Values[0], NumValues));
2266 void SelectionDAGLowering::visitTrunc(User &I) {
2267 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2268 SDValue N = getValue(I.getOperand(0));
2269 EVT DestVT = TLI.getValueType(I.getType());
2270 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2273 void SelectionDAGLowering::visitZExt(User &I) {
2274 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2275 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2276 SDValue N = getValue(I.getOperand(0));
2277 EVT DestVT = TLI.getValueType(I.getType());
2278 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
2281 void SelectionDAGLowering::visitSExt(User &I) {
2282 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2283 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2284 SDValue N = getValue(I.getOperand(0));
2285 EVT DestVT = TLI.getValueType(I.getType());
2286 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
2289 void SelectionDAGLowering::visitFPTrunc(User &I) {
2290 // FPTrunc is never a no-op cast, no need to check
2291 SDValue N = getValue(I.getOperand(0));
2292 EVT DestVT = TLI.getValueType(I.getType());
2293 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2294 DestVT, N, DAG.getIntPtrConstant(0)));
2297 void SelectionDAGLowering::visitFPExt(User &I){
2298 // FPTrunc is never a no-op cast, no need to check
2299 SDValue N = getValue(I.getOperand(0));
2300 EVT DestVT = TLI.getValueType(I.getType());
2301 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
2304 void SelectionDAGLowering::visitFPToUI(User &I) {
2305 // FPToUI is never a no-op cast, no need to check
2306 SDValue N = getValue(I.getOperand(0));
2307 EVT DestVT = TLI.getValueType(I.getType());
2308 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
2311 void SelectionDAGLowering::visitFPToSI(User &I) {
2312 // FPToSI is never a no-op cast, no need to check
2313 SDValue N = getValue(I.getOperand(0));
2314 EVT DestVT = TLI.getValueType(I.getType());
2315 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
2318 void SelectionDAGLowering::visitUIToFP(User &I) {
2319 // UIToFP is never a no-op cast, no need to check
2320 SDValue N = getValue(I.getOperand(0));
2321 EVT DestVT = TLI.getValueType(I.getType());
2322 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
2325 void SelectionDAGLowering::visitSIToFP(User &I){
2326 // SIToFP is never a no-op cast, no need to check
2327 SDValue N = getValue(I.getOperand(0));
2328 EVT DestVT = TLI.getValueType(I.getType());
2329 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
2332 void SelectionDAGLowering::visitPtrToInt(User &I) {
2333 // What to do depends on the size of the integer and the size of the pointer.
2334 // We can either truncate, zero extend, or no-op, accordingly.
2335 SDValue N = getValue(I.getOperand(0));
2336 EVT SrcVT = N.getValueType();
2337 EVT DestVT = TLI.getValueType(I.getType());
2338 SDValue Result;
2339 if (DestVT.bitsLT(SrcVT))
2340 Result = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
2341 else
2342 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2343 Result = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
2344 setValue(&I, Result);
2347 void SelectionDAGLowering::visitIntToPtr(User &I) {
2348 // What to do depends on the size of the integer and the size of the pointer.
2349 // We can either truncate, zero extend, or no-op, accordingly.
2350 SDValue N = getValue(I.getOperand(0));
2351 EVT SrcVT = N.getValueType();
2352 EVT DestVT = TLI.getValueType(I.getType());
2353 if (DestVT.bitsLT(SrcVT))
2354 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2355 else
2356 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2357 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2358 DestVT, N));
2361 void SelectionDAGLowering::visitBitCast(User &I) {
2362 SDValue N = getValue(I.getOperand(0));
2363 EVT DestVT = TLI.getValueType(I.getType());
2365 // BitCast assures us that source and destination are the same size so this
2366 // is either a BIT_CONVERT or a no-op.
2367 if (DestVT != N.getValueType())
2368 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2369 DestVT, N)); // convert types
2370 else
2371 setValue(&I, N); // noop cast.
2374 void SelectionDAGLowering::visitInsertElement(User &I) {
2375 SDValue InVec = getValue(I.getOperand(0));
2376 SDValue InVal = getValue(I.getOperand(1));
2377 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2378 TLI.getPointerTy(),
2379 getValue(I.getOperand(2)));
2381 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2382 TLI.getValueType(I.getType()),
2383 InVec, InVal, InIdx));
2386 void SelectionDAGLowering::visitExtractElement(User &I) {
2387 SDValue InVec = getValue(I.getOperand(0));
2388 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2389 TLI.getPointerTy(),
2390 getValue(I.getOperand(1)));
2391 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2392 TLI.getValueType(I.getType()), InVec, InIdx));
2396 // Utility for visitShuffleVector - Returns true if the mask is mask starting
2397 // from SIndx and increasing to the element length (undefs are allowed).
2398 static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2399 unsigned MaskNumElts = Mask.size();
2400 for (unsigned i = 0; i != MaskNumElts; ++i)
2401 if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2402 return false;
2403 return true;
2406 void SelectionDAGLowering::visitShuffleVector(User &I) {
2407 SmallVector<int, 8> Mask;
2408 SDValue Src1 = getValue(I.getOperand(0));
2409 SDValue Src2 = getValue(I.getOperand(1));
2411 // Convert the ConstantVector mask operand into an array of ints, with -1
2412 // representing undef values.
2413 SmallVector<Constant*, 8> MaskElts;
2414 cast<Constant>(I.getOperand(2))->getVectorElements(*DAG.getContext(),
2415 MaskElts);
2416 unsigned MaskNumElts = MaskElts.size();
2417 for (unsigned i = 0; i != MaskNumElts; ++i) {
2418 if (isa<UndefValue>(MaskElts[i]))
2419 Mask.push_back(-1);
2420 else
2421 Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2424 EVT VT = TLI.getValueType(I.getType());
2425 EVT SrcVT = Src1.getValueType();
2426 unsigned SrcNumElts = SrcVT.getVectorNumElements();
2428 if (SrcNumElts == MaskNumElts) {
2429 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2430 &Mask[0]));
2431 return;
2434 // Normalize the shuffle vector since mask and vector length don't match.
2435 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2436 // Mask is longer than the source vectors and is a multiple of the source
2437 // vectors. We can use concatenate vector to make the mask and vectors
2438 // lengths match.
2439 if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2440 // The shuffle is concatenating two vectors together.
2441 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2442 VT, Src1, Src2));
2443 return;
2446 // Pad both vectors with undefs to make them the same length as the mask.
2447 unsigned NumConcat = MaskNumElts / SrcNumElts;
2448 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2449 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2450 SDValue UndefVal = DAG.getUNDEF(SrcVT);
2452 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2453 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2454 MOps1[0] = Src1;
2455 MOps2[0] = Src2;
2457 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2458 getCurDebugLoc(), VT,
2459 &MOps1[0], NumConcat);
2460 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2461 getCurDebugLoc(), VT,
2462 &MOps2[0], NumConcat);
2464 // Readjust mask for new input vector length.
2465 SmallVector<int, 8> MappedOps;
2466 for (unsigned i = 0; i != MaskNumElts; ++i) {
2467 int Idx = Mask[i];
2468 if (Idx < (int)SrcNumElts)
2469 MappedOps.push_back(Idx);
2470 else
2471 MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2473 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2474 &MappedOps[0]));
2475 return;
2478 if (SrcNumElts > MaskNumElts) {
2479 // Analyze the access pattern of the vector to see if we can extract
2480 // two subvectors and do the shuffle. The analysis is done by calculating
2481 // the range of elements the mask access on both vectors.
2482 int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2483 int MaxRange[2] = {-1, -1};
2485 for (unsigned i = 0; i != MaskNumElts; ++i) {
2486 int Idx = Mask[i];
2487 int Input = 0;
2488 if (Idx < 0)
2489 continue;
2491 if (Idx >= (int)SrcNumElts) {
2492 Input = 1;
2493 Idx -= SrcNumElts;
2495 if (Idx > MaxRange[Input])
2496 MaxRange[Input] = Idx;
2497 if (Idx < MinRange[Input])
2498 MinRange[Input] = Idx;
2501 // Check if the access is smaller than the vector size and can we find
2502 // a reasonable extract index.
2503 int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2504 int StartIdx[2]; // StartIdx to extract from
2505 for (int Input=0; Input < 2; ++Input) {
2506 if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2507 RangeUse[Input] = 0; // Unused
2508 StartIdx[Input] = 0;
2509 } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2510 // Fits within range but we should see if we can find a good
2511 // start index that is a multiple of the mask length.
2512 if (MaxRange[Input] < (int)MaskNumElts) {
2513 RangeUse[Input] = 1; // Extract from beginning of the vector
2514 StartIdx[Input] = 0;
2515 } else {
2516 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2517 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2518 StartIdx[Input] + MaskNumElts < SrcNumElts)
2519 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2524 if (RangeUse[0] == 0 && RangeUse[1] == 0) {
2525 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
2526 return;
2528 else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2529 // Extract appropriate subvector and generate a vector shuffle
2530 for (int Input=0; Input < 2; ++Input) {
2531 SDValue& Src = Input == 0 ? Src1 : Src2;
2532 if (RangeUse[Input] == 0) {
2533 Src = DAG.getUNDEF(VT);
2534 } else {
2535 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2536 Src, DAG.getIntPtrConstant(StartIdx[Input]));
2539 // Calculate new mask.
2540 SmallVector<int, 8> MappedOps;
2541 for (unsigned i = 0; i != MaskNumElts; ++i) {
2542 int Idx = Mask[i];
2543 if (Idx < 0)
2544 MappedOps.push_back(Idx);
2545 else if (Idx < (int)SrcNumElts)
2546 MappedOps.push_back(Idx - StartIdx[0]);
2547 else
2548 MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2550 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2551 &MappedOps[0]));
2552 return;
2556 // We can't use either concat vectors or extract subvectors so fall back to
2557 // replacing the shuffle with extract and build vector.
2558 // to insert and build vector.
2559 EVT EltVT = VT.getVectorElementType();
2560 EVT PtrVT = TLI.getPointerTy();
2561 SmallVector<SDValue,8> Ops;
2562 for (unsigned i = 0; i != MaskNumElts; ++i) {
2563 if (Mask[i] < 0) {
2564 Ops.push_back(DAG.getUNDEF(EltVT));
2565 } else {
2566 int Idx = Mask[i];
2567 if (Idx < (int)SrcNumElts)
2568 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2569 EltVT, Src1, DAG.getConstant(Idx, PtrVT)));
2570 else
2571 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2572 EltVT, Src2,
2573 DAG.getConstant(Idx - SrcNumElts, PtrVT)));
2576 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2577 VT, &Ops[0], Ops.size()));
2580 void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
2581 const Value *Op0 = I.getOperand(0);
2582 const Value *Op1 = I.getOperand(1);
2583 const Type *AggTy = I.getType();
2584 const Type *ValTy = Op1->getType();
2585 bool IntoUndef = isa<UndefValue>(Op0);
2586 bool FromUndef = isa<UndefValue>(Op1);
2588 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2589 I.idx_begin(), I.idx_end());
2591 SmallVector<EVT, 4> AggValueVTs;
2592 ComputeValueVTs(TLI, AggTy, AggValueVTs);
2593 SmallVector<EVT, 4> ValValueVTs;
2594 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2596 unsigned NumAggValues = AggValueVTs.size();
2597 unsigned NumValValues = ValValueVTs.size();
2598 SmallVector<SDValue, 4> Values(NumAggValues);
2600 SDValue Agg = getValue(Op0);
2601 SDValue Val = getValue(Op1);
2602 unsigned i = 0;
2603 // Copy the beginning value(s) from the original aggregate.
2604 for (; i != LinearIndex; ++i)
2605 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2606 SDValue(Agg.getNode(), Agg.getResNo() + i);
2607 // Copy values from the inserted value(s).
2608 for (; i != LinearIndex + NumValValues; ++i)
2609 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2610 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2611 // Copy remaining value(s) from the original aggregate.
2612 for (; i != NumAggValues; ++i)
2613 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2614 SDValue(Agg.getNode(), Agg.getResNo() + i);
2616 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2617 DAG.getVTList(&AggValueVTs[0], NumAggValues),
2618 &Values[0], NumAggValues));
2621 void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
2622 const Value *Op0 = I.getOperand(0);
2623 const Type *AggTy = Op0->getType();
2624 const Type *ValTy = I.getType();
2625 bool OutOfUndef = isa<UndefValue>(Op0);
2627 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2628 I.idx_begin(), I.idx_end());
2630 SmallVector<EVT, 4> ValValueVTs;
2631 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2633 unsigned NumValValues = ValValueVTs.size();
2634 SmallVector<SDValue, 4> Values(NumValValues);
2636 SDValue Agg = getValue(Op0);
2637 // Copy out the selected value(s).
2638 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2639 Values[i - LinearIndex] =
2640 OutOfUndef ?
2641 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2642 SDValue(Agg.getNode(), Agg.getResNo() + i);
2644 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2645 DAG.getVTList(&ValValueVTs[0], NumValValues),
2646 &Values[0], NumValValues));
2650 void SelectionDAGLowering::visitGetElementPtr(User &I) {
2651 SDValue N = getValue(I.getOperand(0));
2652 const Type *Ty = I.getOperand(0)->getType();
2654 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2655 OI != E; ++OI) {
2656 Value *Idx = *OI;
2657 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2658 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2659 if (Field) {
2660 // N = N + Offset
2661 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2662 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2663 DAG.getIntPtrConstant(Offset));
2665 Ty = StTy->getElementType(Field);
2666 } else {
2667 Ty = cast<SequentialType>(Ty)->getElementType();
2669 // If this is a constant subscript, handle it quickly.
2670 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2671 if (CI->getZExtValue() == 0) continue;
2672 uint64_t Offs =
2673 TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2674 SDValue OffsVal;
2675 EVT PTy = TLI.getPointerTy();
2676 unsigned PtrBits = PTy.getSizeInBits();
2677 if (PtrBits < 64) {
2678 OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2679 TLI.getPointerTy(),
2680 DAG.getConstant(Offs, MVT::i64));
2681 } else
2682 OffsVal = DAG.getIntPtrConstant(Offs);
2683 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2684 OffsVal);
2685 continue;
2688 // N = N + Idx * ElementSize;
2689 uint64_t ElementSize = TD->getTypeAllocSize(Ty);
2690 SDValue IdxN = getValue(Idx);
2692 // If the index is smaller or larger than intptr_t, truncate or extend
2693 // it.
2694 if (IdxN.getValueType().bitsLT(N.getValueType()))
2695 IdxN = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(),
2696 N.getValueType(), IdxN);
2697 else if (IdxN.getValueType().bitsGT(N.getValueType()))
2698 IdxN = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2699 N.getValueType(), IdxN);
2701 // If this is a multiply by a power of two, turn it into a shl
2702 // immediately. This is a very common case.
2703 if (ElementSize != 1) {
2704 if (isPowerOf2_64(ElementSize)) {
2705 unsigned Amt = Log2_64(ElementSize);
2706 IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2707 N.getValueType(), IdxN,
2708 DAG.getConstant(Amt, TLI.getPointerTy()));
2709 } else {
2710 SDValue Scale = DAG.getIntPtrConstant(ElementSize);
2711 IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2712 N.getValueType(), IdxN, Scale);
2716 N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2717 N.getValueType(), N, IdxN);
2720 setValue(&I, N);
2723 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
2724 // If this is a fixed sized alloca in the entry block of the function,
2725 // allocate it statically on the stack.
2726 if (FuncInfo.StaticAllocaMap.count(&I))
2727 return; // getValue will auto-populate this.
2729 const Type *Ty = I.getAllocatedType();
2730 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2731 unsigned Align =
2732 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2733 I.getAlignment());
2735 SDValue AllocSize = getValue(I.getArraySize());
2737 AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2738 AllocSize,
2739 DAG.getConstant(TySize, AllocSize.getValueType()));
2743 EVT IntPtr = TLI.getPointerTy();
2744 if (IntPtr.bitsLT(AllocSize.getValueType()))
2745 AllocSize = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2746 IntPtr, AllocSize);
2747 else if (IntPtr.bitsGT(AllocSize.getValueType()))
2748 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2749 IntPtr, AllocSize);
2751 // Handle alignment. If the requested alignment is less than or equal to
2752 // the stack alignment, ignore it. If the size is greater than or equal to
2753 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2754 unsigned StackAlign =
2755 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2756 if (Align <= StackAlign)
2757 Align = 0;
2759 // Round the size of the allocation up to the stack alignment size
2760 // by add SA-1 to the size.
2761 AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2762 AllocSize.getValueType(), AllocSize,
2763 DAG.getIntPtrConstant(StackAlign-1));
2764 // Mask out the low bits for alignment purposes.
2765 AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2766 AllocSize.getValueType(), AllocSize,
2767 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2769 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2770 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2771 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2772 VTs, Ops, 3);
2773 setValue(&I, DSA);
2774 DAG.setRoot(DSA.getValue(1));
2776 // Inform the Frame Information that we have just allocated a variable-sized
2777 // object.
2778 FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
2781 void SelectionDAGLowering::visitLoad(LoadInst &I) {
2782 const Value *SV = I.getOperand(0);
2783 SDValue Ptr = getValue(SV);
2785 const Type *Ty = I.getType();
2786 bool isVolatile = I.isVolatile();
2787 unsigned Alignment = I.getAlignment();
2789 SmallVector<EVT, 4> ValueVTs;
2790 SmallVector<uint64_t, 4> Offsets;
2791 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2792 unsigned NumValues = ValueVTs.size();
2793 if (NumValues == 0)
2794 return;
2796 SDValue Root;
2797 bool ConstantMemory = false;
2798 if (I.isVolatile())
2799 // Serialize volatile loads with other side effects.
2800 Root = getRoot();
2801 else if (AA->pointsToConstantMemory(SV)) {
2802 // Do not serialize (non-volatile) loads of constant memory with anything.
2803 Root = DAG.getEntryNode();
2804 ConstantMemory = true;
2805 } else {
2806 // Do not serialize non-volatile loads against each other.
2807 Root = DAG.getRoot();
2810 SmallVector<SDValue, 4> Values(NumValues);
2811 SmallVector<SDValue, 4> Chains(NumValues);
2812 EVT PtrVT = Ptr.getValueType();
2813 for (unsigned i = 0; i != NumValues; ++i) {
2814 SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2815 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2816 PtrVT, Ptr,
2817 DAG.getConstant(Offsets[i], PtrVT)),
2818 SV, Offsets[i], isVolatile, Alignment, Alignment);
2819 Values[i] = L;
2820 Chains[i] = L.getValue(1);
2823 if (!ConstantMemory) {
2824 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2825 MVT::Other,
2826 &Chains[0], NumValues);
2827 if (isVolatile)
2828 DAG.setRoot(Chain);
2829 else
2830 PendingLoads.push_back(Chain);
2833 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2834 DAG.getVTList(&ValueVTs[0], NumValues),
2835 &Values[0], NumValues));
2839 void SelectionDAGLowering::visitStore(StoreInst &I) {
2840 Value *SrcV = I.getOperand(0);
2841 Value *PtrV = I.getOperand(1);
2843 SmallVector<EVT, 4> ValueVTs;
2844 SmallVector<uint64_t, 4> Offsets;
2845 ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2846 unsigned NumValues = ValueVTs.size();
2847 if (NumValues == 0)
2848 return;
2850 // Get the lowered operands. Note that we do this after
2851 // checking if NumResults is zero, because with zero results
2852 // the operands won't have values in the map.
2853 SDValue Src = getValue(SrcV);
2854 SDValue Ptr = getValue(PtrV);
2856 SDValue Root = getRoot();
2857 SmallVector<SDValue, 4> Chains(NumValues);
2858 EVT PtrVT = Ptr.getValueType();
2859 bool isVolatile = I.isVolatile();
2860 unsigned Alignment = I.getAlignment();
2861 for (unsigned i = 0; i != NumValues; ++i)
2862 Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2863 SDValue(Src.getNode(), Src.getResNo() + i),
2864 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2865 PtrVT, Ptr,
2866 DAG.getConstant(Offsets[i], PtrVT)),
2867 PtrV, Offsets[i],
2868 isVolatile, Alignment, Alignment);
2870 DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2871 MVT::Other, &Chains[0], NumValues));
2874 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2875 /// node.
2876 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
2877 unsigned Intrinsic) {
2878 bool HasChain = !I.doesNotAccessMemory();
2879 bool OnlyLoad = HasChain && I.onlyReadsMemory();
2881 // Build the operand list.
2882 SmallVector<SDValue, 8> Ops;
2883 if (HasChain) { // If this intrinsic has side-effects, chainify it.
2884 if (OnlyLoad) {
2885 // We don't need to serialize loads against other loads.
2886 Ops.push_back(DAG.getRoot());
2887 } else {
2888 Ops.push_back(getRoot());
2892 // Info is set by getTgtMemInstrinsic
2893 TargetLowering::IntrinsicInfo Info;
2894 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2896 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2897 if (!IsTgtIntrinsic)
2898 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2900 // Add all operands of the call to the operand list.
2901 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2902 SDValue Op = getValue(I.getOperand(i));
2903 assert(TLI.isTypeLegal(Op.getValueType()) &&
2904 "Intrinsic uses a non-legal type?");
2905 Ops.push_back(Op);
2908 SmallVector<EVT, 4> ValueVTs;
2909 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2910 #ifndef NDEBUG
2911 for (unsigned Val = 0, E = ValueVTs.size(); Val != E; ++Val) {
2912 assert(TLI.isTypeLegal(ValueVTs[Val]) &&
2913 "Intrinsic uses a non-legal type?");
2915 #endif // NDEBUG
2916 if (HasChain)
2917 ValueVTs.push_back(MVT::Other);
2919 SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
2921 // Create the node.
2922 SDValue Result;
2923 if (IsTgtIntrinsic) {
2924 // This is target intrinsic that touches memory
2925 Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2926 VTs, &Ops[0], Ops.size(),
2927 Info.memVT, Info.ptrVal, Info.offset,
2928 Info.align, Info.vol,
2929 Info.readMem, Info.writeMem);
2931 else if (!HasChain)
2932 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2933 VTs, &Ops[0], Ops.size());
2934 else if (I.getType() != Type::getVoidTy(*DAG.getContext()))
2935 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2936 VTs, &Ops[0], Ops.size());
2937 else
2938 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2939 VTs, &Ops[0], Ops.size());
2941 if (HasChain) {
2942 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2943 if (OnlyLoad)
2944 PendingLoads.push_back(Chain);
2945 else
2946 DAG.setRoot(Chain);
2948 if (I.getType() != Type::getVoidTy(*DAG.getContext())) {
2949 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2950 EVT VT = TLI.getValueType(PTy);
2951 Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
2953 setValue(&I, Result);
2957 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
2958 static GlobalVariable *ExtractTypeInfo(Value *V) {
2959 V = V->stripPointerCasts();
2960 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2961 assert ((GV || isa<ConstantPointerNull>(V)) &&
2962 "TypeInfo must be a global variable or NULL");
2963 return GV;
2966 namespace llvm {
2968 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
2969 /// call, and add them to the specified machine basic block.
2970 void AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
2971 MachineBasicBlock *MBB) {
2972 // Inform the MachineModuleInfo of the personality for this landing pad.
2973 ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
2974 assert(CE->getOpcode() == Instruction::BitCast &&
2975 isa<Function>(CE->getOperand(0)) &&
2976 "Personality should be a function");
2977 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
2979 // Gather all the type infos for this landing pad and pass them along to
2980 // MachineModuleInfo.
2981 std::vector<GlobalVariable *> TyInfo;
2982 unsigned N = I.getNumOperands();
2984 for (unsigned i = N - 1; i > 2; --i) {
2985 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
2986 unsigned FilterLength = CI->getZExtValue();
2987 unsigned FirstCatch = i + FilterLength + !FilterLength;
2988 assert (FirstCatch <= N && "Invalid filter length");
2990 if (FirstCatch < N) {
2991 TyInfo.reserve(N - FirstCatch);
2992 for (unsigned j = FirstCatch; j < N; ++j)
2993 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2994 MMI->addCatchTypeInfo(MBB, TyInfo);
2995 TyInfo.clear();
2998 if (!FilterLength) {
2999 // Cleanup.
3000 MMI->addCleanup(MBB);
3001 } else {
3002 // Filter.
3003 TyInfo.reserve(FilterLength - 1);
3004 for (unsigned j = i + 1; j < FirstCatch; ++j)
3005 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3006 MMI->addFilterTypeInfo(MBB, TyInfo);
3007 TyInfo.clear();
3010 N = i;
3014 if (N > 3) {
3015 TyInfo.reserve(N - 3);
3016 for (unsigned j = 3; j < N; ++j)
3017 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3018 MMI->addCatchTypeInfo(MBB, TyInfo);
3024 /// GetSignificand - Get the significand and build it into a floating-point
3025 /// number with exponent of 1:
3027 /// Op = (Op & 0x007fffff) | 0x3f800000;
3029 /// where Op is the hexidecimal representation of floating point value.
3030 static SDValue
3031 GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
3032 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3033 DAG.getConstant(0x007fffff, MVT::i32));
3034 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3035 DAG.getConstant(0x3f800000, MVT::i32));
3036 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
3039 /// GetExponent - Get the exponent:
3041 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3043 /// where Op is the hexidecimal representation of floating point value.
3044 static SDValue
3045 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3046 DebugLoc dl) {
3047 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3048 DAG.getConstant(0x7f800000, MVT::i32));
3049 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3050 DAG.getConstant(23, TLI.getPointerTy()));
3051 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3052 DAG.getConstant(127, MVT::i32));
3053 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3056 /// getF32Constant - Get 32-bit floating point constant.
3057 static SDValue
3058 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3059 return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3062 /// Inlined utility function to implement binary input atomic intrinsics for
3063 /// visitIntrinsicCall: I is a call instruction
3064 /// Op is the associated NodeType for I
3065 const char *
3066 SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3067 SDValue Root = getRoot();
3068 SDValue L =
3069 DAG.getAtomic(Op, getCurDebugLoc(),
3070 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3071 Root,
3072 getValue(I.getOperand(1)),
3073 getValue(I.getOperand(2)),
3074 I.getOperand(1));
3075 setValue(&I, L);
3076 DAG.setRoot(L.getValue(1));
3077 return 0;
3080 // implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3081 const char *
3082 SelectionDAGLowering::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3083 SDValue Op1 = getValue(I.getOperand(1));
3084 SDValue Op2 = getValue(I.getOperand(2));
3086 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
3087 SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
3089 setValue(&I, Result);
3090 return 0;
3093 /// visitExp - Lower an exp intrinsic. Handles the special sequences for
3094 /// limited-precision mode.
3095 void
3096 SelectionDAGLowering::visitExp(CallInst &I) {
3097 SDValue result;
3098 DebugLoc dl = getCurDebugLoc();
3100 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3101 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3102 SDValue Op = getValue(I.getOperand(1));
3104 // Put the exponent in the right bit position for later addition to the
3105 // final result:
3107 // #define LOG2OFe 1.4426950f
3108 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3109 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3110 getF32Constant(DAG, 0x3fb8aa3b));
3111 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3113 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3114 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3115 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3117 // IntegerPartOfX <<= 23;
3118 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3119 DAG.getConstant(23, TLI.getPointerTy()));
3121 if (LimitFloatPrecision <= 6) {
3122 // For floating-point precision of 6:
3124 // TwoToFractionalPartOfX =
3125 // 0.997535578f +
3126 // (0.735607626f + 0.252464424f * x) * x;
3128 // error 0.0144103317, which is 6 bits
3129 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3130 getF32Constant(DAG, 0x3e814304));
3131 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3132 getF32Constant(DAG, 0x3f3c50c8));
3133 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3134 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3135 getF32Constant(DAG, 0x3f7f5e7e));
3136 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3138 // Add the exponent into the result in integer domain.
3139 SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3140 TwoToFracPartOfX, IntegerPartOfX);
3142 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3143 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3144 // For floating-point precision of 12:
3146 // TwoToFractionalPartOfX =
3147 // 0.999892986f +
3148 // (0.696457318f +
3149 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3151 // 0.000107046256 error, which is 13 to 14 bits
3152 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3153 getF32Constant(DAG, 0x3da235e3));
3154 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3155 getF32Constant(DAG, 0x3e65b8f3));
3156 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3157 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3158 getF32Constant(DAG, 0x3f324b07));
3159 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3160 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3161 getF32Constant(DAG, 0x3f7ff8fd));
3162 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3164 // Add the exponent into the result in integer domain.
3165 SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3166 TwoToFracPartOfX, IntegerPartOfX);
3168 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3169 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3170 // For floating-point precision of 18:
3172 // TwoToFractionalPartOfX =
3173 // 0.999999982f +
3174 // (0.693148872f +
3175 // (0.240227044f +
3176 // (0.554906021e-1f +
3177 // (0.961591928e-2f +
3178 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3180 // error 2.47208000*10^(-7), which is better than 18 bits
3181 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3182 getF32Constant(DAG, 0x3924b03e));
3183 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3184 getF32Constant(DAG, 0x3ab24b87));
3185 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3186 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3187 getF32Constant(DAG, 0x3c1d8c17));
3188 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3189 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3190 getF32Constant(DAG, 0x3d634a1d));
3191 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3192 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3193 getF32Constant(DAG, 0x3e75fe14));
3194 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3195 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3196 getF32Constant(DAG, 0x3f317234));
3197 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3198 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3199 getF32Constant(DAG, 0x3f800000));
3200 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3201 MVT::i32, t13);
3203 // Add the exponent into the result in integer domain.
3204 SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3205 TwoToFracPartOfX, IntegerPartOfX);
3207 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3209 } else {
3210 // No special expansion.
3211 result = DAG.getNode(ISD::FEXP, dl,
3212 getValue(I.getOperand(1)).getValueType(),
3213 getValue(I.getOperand(1)));
3216 setValue(&I, result);
3219 /// visitLog - Lower a log intrinsic. Handles the special sequences for
3220 /// limited-precision mode.
3221 void
3222 SelectionDAGLowering::visitLog(CallInst &I) {
3223 SDValue result;
3224 DebugLoc dl = getCurDebugLoc();
3226 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3227 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3228 SDValue Op = getValue(I.getOperand(1));
3229 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3231 // Scale the exponent by log(2) [0.69314718f].
3232 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3233 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3234 getF32Constant(DAG, 0x3f317218));
3236 // Get the significand and build it into a floating-point number with
3237 // exponent of 1.
3238 SDValue X = GetSignificand(DAG, Op1, dl);
3240 if (LimitFloatPrecision <= 6) {
3241 // For floating-point precision of 6:
3243 // LogofMantissa =
3244 // -1.1609546f +
3245 // (1.4034025f - 0.23903021f * x) * x;
3247 // error 0.0034276066, which is better than 8 bits
3248 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3249 getF32Constant(DAG, 0xbe74c456));
3250 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3251 getF32Constant(DAG, 0x3fb3a2b1));
3252 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3253 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3254 getF32Constant(DAG, 0x3f949a29));
3256 result = DAG.getNode(ISD::FADD, dl,
3257 MVT::f32, LogOfExponent, LogOfMantissa);
3258 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3259 // For floating-point precision of 12:
3261 // LogOfMantissa =
3262 // -1.7417939f +
3263 // (2.8212026f +
3264 // (-1.4699568f +
3265 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3267 // error 0.000061011436, which is 14 bits
3268 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3269 getF32Constant(DAG, 0xbd67b6d6));
3270 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3271 getF32Constant(DAG, 0x3ee4f4b8));
3272 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3273 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3274 getF32Constant(DAG, 0x3fbc278b));
3275 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3276 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3277 getF32Constant(DAG, 0x40348e95));
3278 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3279 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3280 getF32Constant(DAG, 0x3fdef31a));
3282 result = DAG.getNode(ISD::FADD, dl,
3283 MVT::f32, LogOfExponent, LogOfMantissa);
3284 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3285 // For floating-point precision of 18:
3287 // LogOfMantissa =
3288 // -2.1072184f +
3289 // (4.2372794f +
3290 // (-3.7029485f +
3291 // (2.2781945f +
3292 // (-0.87823314f +
3293 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3295 // error 0.0000023660568, which is better than 18 bits
3296 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3297 getF32Constant(DAG, 0xbc91e5ac));
3298 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3299 getF32Constant(DAG, 0x3e4350aa));
3300 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3301 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3302 getF32Constant(DAG, 0x3f60d3e3));
3303 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3304 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3305 getF32Constant(DAG, 0x4011cdf0));
3306 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3307 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3308 getF32Constant(DAG, 0x406cfd1c));
3309 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3310 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3311 getF32Constant(DAG, 0x408797cb));
3312 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3313 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3314 getF32Constant(DAG, 0x4006dcab));
3316 result = DAG.getNode(ISD::FADD, dl,
3317 MVT::f32, LogOfExponent, LogOfMantissa);
3319 } else {
3320 // No special expansion.
3321 result = DAG.getNode(ISD::FLOG, dl,
3322 getValue(I.getOperand(1)).getValueType(),
3323 getValue(I.getOperand(1)));
3326 setValue(&I, result);
3329 /// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3330 /// limited-precision mode.
3331 void
3332 SelectionDAGLowering::visitLog2(CallInst &I) {
3333 SDValue result;
3334 DebugLoc dl = getCurDebugLoc();
3336 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3337 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3338 SDValue Op = getValue(I.getOperand(1));
3339 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3341 // Get the exponent.
3342 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
3344 // Get the significand and build it into a floating-point number with
3345 // exponent of 1.
3346 SDValue X = GetSignificand(DAG, Op1, dl);
3348 // Different possible minimax approximations of significand in
3349 // floating-point for various degrees of accuracy over [1,2].
3350 if (LimitFloatPrecision <= 6) {
3351 // For floating-point precision of 6:
3353 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3355 // error 0.0049451742, which is more than 7 bits
3356 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3357 getF32Constant(DAG, 0xbeb08fe0));
3358 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3359 getF32Constant(DAG, 0x40019463));
3360 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3361 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3362 getF32Constant(DAG, 0x3fd6633d));
3364 result = DAG.getNode(ISD::FADD, dl,
3365 MVT::f32, LogOfExponent, Log2ofMantissa);
3366 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3367 // For floating-point precision of 12:
3369 // Log2ofMantissa =
3370 // -2.51285454f +
3371 // (4.07009056f +
3372 // (-2.12067489f +
3373 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3375 // error 0.0000876136000, which is better than 13 bits
3376 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3377 getF32Constant(DAG, 0xbda7262e));
3378 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3379 getF32Constant(DAG, 0x3f25280b));
3380 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3381 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3382 getF32Constant(DAG, 0x4007b923));
3383 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3384 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3385 getF32Constant(DAG, 0x40823e2f));
3386 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3387 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3388 getF32Constant(DAG, 0x4020d29c));
3390 result = DAG.getNode(ISD::FADD, dl,
3391 MVT::f32, LogOfExponent, Log2ofMantissa);
3392 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3393 // For floating-point precision of 18:
3395 // Log2ofMantissa =
3396 // -3.0400495f +
3397 // (6.1129976f +
3398 // (-5.3420409f +
3399 // (3.2865683f +
3400 // (-1.2669343f +
3401 // (0.27515199f -
3402 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3404 // error 0.0000018516, which is better than 18 bits
3405 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3406 getF32Constant(DAG, 0xbcd2769e));
3407 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3408 getF32Constant(DAG, 0x3e8ce0b9));
3409 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3410 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3411 getF32Constant(DAG, 0x3fa22ae7));
3412 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3413 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3414 getF32Constant(DAG, 0x40525723));
3415 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3416 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3417 getF32Constant(DAG, 0x40aaf200));
3418 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3419 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3420 getF32Constant(DAG, 0x40c39dad));
3421 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3422 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3423 getF32Constant(DAG, 0x4042902c));
3425 result = DAG.getNode(ISD::FADD, dl,
3426 MVT::f32, LogOfExponent, Log2ofMantissa);
3428 } else {
3429 // No special expansion.
3430 result = DAG.getNode(ISD::FLOG2, dl,
3431 getValue(I.getOperand(1)).getValueType(),
3432 getValue(I.getOperand(1)));
3435 setValue(&I, result);
3438 /// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3439 /// limited-precision mode.
3440 void
3441 SelectionDAGLowering::visitLog10(CallInst &I) {
3442 SDValue result;
3443 DebugLoc dl = getCurDebugLoc();
3445 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3446 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3447 SDValue Op = getValue(I.getOperand(1));
3448 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3450 // Scale the exponent by log10(2) [0.30102999f].
3451 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3452 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3453 getF32Constant(DAG, 0x3e9a209a));
3455 // Get the significand and build it into a floating-point number with
3456 // exponent of 1.
3457 SDValue X = GetSignificand(DAG, Op1, dl);
3459 if (LimitFloatPrecision <= 6) {
3460 // For floating-point precision of 6:
3462 // Log10ofMantissa =
3463 // -0.50419619f +
3464 // (0.60948995f - 0.10380950f * x) * x;
3466 // error 0.0014886165, which is 6 bits
3467 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3468 getF32Constant(DAG, 0xbdd49a13));
3469 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3470 getF32Constant(DAG, 0x3f1c0789));
3471 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3472 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3473 getF32Constant(DAG, 0x3f011300));
3475 result = DAG.getNode(ISD::FADD, dl,
3476 MVT::f32, LogOfExponent, Log10ofMantissa);
3477 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3478 // For floating-point precision of 12:
3480 // Log10ofMantissa =
3481 // -0.64831180f +
3482 // (0.91751397f +
3483 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3485 // error 0.00019228036, which is better than 12 bits
3486 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3487 getF32Constant(DAG, 0x3d431f31));
3488 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3489 getF32Constant(DAG, 0x3ea21fb2));
3490 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3491 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3492 getF32Constant(DAG, 0x3f6ae232));
3493 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3494 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3495 getF32Constant(DAG, 0x3f25f7c3));
3497 result = DAG.getNode(ISD::FADD, dl,
3498 MVT::f32, LogOfExponent, Log10ofMantissa);
3499 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3500 // For floating-point precision of 18:
3502 // Log10ofMantissa =
3503 // -0.84299375f +
3504 // (1.5327582f +
3505 // (-1.0688956f +
3506 // (0.49102474f +
3507 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3509 // error 0.0000037995730, which is better than 18 bits
3510 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3511 getF32Constant(DAG, 0x3c5d51ce));
3512 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3513 getF32Constant(DAG, 0x3e00685a));
3514 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3515 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3516 getF32Constant(DAG, 0x3efb6798));
3517 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3518 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3519 getF32Constant(DAG, 0x3f88d192));
3520 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3521 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3522 getF32Constant(DAG, 0x3fc4316c));
3523 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3524 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3525 getF32Constant(DAG, 0x3f57ce70));
3527 result = DAG.getNode(ISD::FADD, dl,
3528 MVT::f32, LogOfExponent, Log10ofMantissa);
3530 } else {
3531 // No special expansion.
3532 result = DAG.getNode(ISD::FLOG10, dl,
3533 getValue(I.getOperand(1)).getValueType(),
3534 getValue(I.getOperand(1)));
3537 setValue(&I, result);
3540 /// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3541 /// limited-precision mode.
3542 void
3543 SelectionDAGLowering::visitExp2(CallInst &I) {
3544 SDValue result;
3545 DebugLoc dl = getCurDebugLoc();
3547 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3548 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3549 SDValue Op = getValue(I.getOperand(1));
3551 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3553 // FractionalPartOfX = x - (float)IntegerPartOfX;
3554 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3555 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3557 // IntegerPartOfX <<= 23;
3558 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3559 DAG.getConstant(23, TLI.getPointerTy()));
3561 if (LimitFloatPrecision <= 6) {
3562 // For floating-point precision of 6:
3564 // TwoToFractionalPartOfX =
3565 // 0.997535578f +
3566 // (0.735607626f + 0.252464424f * x) * x;
3568 // error 0.0144103317, which is 6 bits
3569 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3570 getF32Constant(DAG, 0x3e814304));
3571 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3572 getF32Constant(DAG, 0x3f3c50c8));
3573 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3574 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3575 getF32Constant(DAG, 0x3f7f5e7e));
3576 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3577 SDValue TwoToFractionalPartOfX =
3578 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3580 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3581 MVT::f32, TwoToFractionalPartOfX);
3582 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3583 // For floating-point precision of 12:
3585 // TwoToFractionalPartOfX =
3586 // 0.999892986f +
3587 // (0.696457318f +
3588 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3590 // error 0.000107046256, which is 13 to 14 bits
3591 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3592 getF32Constant(DAG, 0x3da235e3));
3593 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3594 getF32Constant(DAG, 0x3e65b8f3));
3595 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3596 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3597 getF32Constant(DAG, 0x3f324b07));
3598 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3599 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3600 getF32Constant(DAG, 0x3f7ff8fd));
3601 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3602 SDValue TwoToFractionalPartOfX =
3603 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3605 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3606 MVT::f32, TwoToFractionalPartOfX);
3607 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3608 // For floating-point precision of 18:
3610 // TwoToFractionalPartOfX =
3611 // 0.999999982f +
3612 // (0.693148872f +
3613 // (0.240227044f +
3614 // (0.554906021e-1f +
3615 // (0.961591928e-2f +
3616 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3617 // error 2.47208000*10^(-7), which is better than 18 bits
3618 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3619 getF32Constant(DAG, 0x3924b03e));
3620 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3621 getF32Constant(DAG, 0x3ab24b87));
3622 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3623 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3624 getF32Constant(DAG, 0x3c1d8c17));
3625 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3626 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3627 getF32Constant(DAG, 0x3d634a1d));
3628 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3629 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3630 getF32Constant(DAG, 0x3e75fe14));
3631 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3632 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3633 getF32Constant(DAG, 0x3f317234));
3634 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3635 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3636 getF32Constant(DAG, 0x3f800000));
3637 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3638 SDValue TwoToFractionalPartOfX =
3639 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3641 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3642 MVT::f32, TwoToFractionalPartOfX);
3644 } else {
3645 // No special expansion.
3646 result = DAG.getNode(ISD::FEXP2, dl,
3647 getValue(I.getOperand(1)).getValueType(),
3648 getValue(I.getOperand(1)));
3651 setValue(&I, result);
3654 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
3655 /// limited-precision mode with x == 10.0f.
3656 void
3657 SelectionDAGLowering::visitPow(CallInst &I) {
3658 SDValue result;
3659 Value *Val = I.getOperand(1);
3660 DebugLoc dl = getCurDebugLoc();
3661 bool IsExp10 = false;
3663 if (getValue(Val).getValueType() == MVT::f32 &&
3664 getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3665 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3666 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3667 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3668 APFloat Ten(10.0f);
3669 IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3674 if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3675 SDValue Op = getValue(I.getOperand(2));
3677 // Put the exponent in the right bit position for later addition to the
3678 // final result:
3680 // #define LOG2OF10 3.3219281f
3681 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
3682 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3683 getF32Constant(DAG, 0x40549a78));
3684 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3686 // FractionalPartOfX = x - (float)IntegerPartOfX;
3687 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3688 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3690 // IntegerPartOfX <<= 23;
3691 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3692 DAG.getConstant(23, TLI.getPointerTy()));
3694 if (LimitFloatPrecision <= 6) {
3695 // For floating-point precision of 6:
3697 // twoToFractionalPartOfX =
3698 // 0.997535578f +
3699 // (0.735607626f + 0.252464424f * x) * x;
3701 // error 0.0144103317, which is 6 bits
3702 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3703 getF32Constant(DAG, 0x3e814304));
3704 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3705 getF32Constant(DAG, 0x3f3c50c8));
3706 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3707 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3708 getF32Constant(DAG, 0x3f7f5e7e));
3709 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3710 SDValue TwoToFractionalPartOfX =
3711 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3713 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3714 MVT::f32, TwoToFractionalPartOfX);
3715 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3716 // For floating-point precision of 12:
3718 // TwoToFractionalPartOfX =
3719 // 0.999892986f +
3720 // (0.696457318f +
3721 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3723 // error 0.000107046256, which is 13 to 14 bits
3724 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3725 getF32Constant(DAG, 0x3da235e3));
3726 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3727 getF32Constant(DAG, 0x3e65b8f3));
3728 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3729 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3730 getF32Constant(DAG, 0x3f324b07));
3731 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3732 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3733 getF32Constant(DAG, 0x3f7ff8fd));
3734 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3735 SDValue TwoToFractionalPartOfX =
3736 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3738 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3739 MVT::f32, TwoToFractionalPartOfX);
3740 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3741 // For floating-point precision of 18:
3743 // TwoToFractionalPartOfX =
3744 // 0.999999982f +
3745 // (0.693148872f +
3746 // (0.240227044f +
3747 // (0.554906021e-1f +
3748 // (0.961591928e-2f +
3749 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3750 // error 2.47208000*10^(-7), which is better than 18 bits
3751 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3752 getF32Constant(DAG, 0x3924b03e));
3753 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3754 getF32Constant(DAG, 0x3ab24b87));
3755 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3756 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3757 getF32Constant(DAG, 0x3c1d8c17));
3758 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3759 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3760 getF32Constant(DAG, 0x3d634a1d));
3761 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3762 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3763 getF32Constant(DAG, 0x3e75fe14));
3764 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3765 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3766 getF32Constant(DAG, 0x3f317234));
3767 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3768 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3769 getF32Constant(DAG, 0x3f800000));
3770 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3771 SDValue TwoToFractionalPartOfX =
3772 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3774 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3775 MVT::f32, TwoToFractionalPartOfX);
3777 } else {
3778 // No special expansion.
3779 result = DAG.getNode(ISD::FPOW, dl,
3780 getValue(I.getOperand(1)).getValueType(),
3781 getValue(I.getOperand(1)),
3782 getValue(I.getOperand(2)));
3785 setValue(&I, result);
3788 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
3789 /// we want to emit this as a call to a named external function, return the name
3790 /// otherwise lower it and return null.
3791 const char *
3792 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3793 DebugLoc dl = getCurDebugLoc();
3794 switch (Intrinsic) {
3795 default:
3796 // By default, turn this into a target intrinsic node.
3797 visitTargetIntrinsic(I, Intrinsic);
3798 return 0;
3799 case Intrinsic::vastart: visitVAStart(I); return 0;
3800 case Intrinsic::vaend: visitVAEnd(I); return 0;
3801 case Intrinsic::vacopy: visitVACopy(I); return 0;
3802 case Intrinsic::returnaddress:
3803 setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3804 getValue(I.getOperand(1))));
3805 return 0;
3806 case Intrinsic::frameaddress:
3807 setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3808 getValue(I.getOperand(1))));
3809 return 0;
3810 case Intrinsic::setjmp:
3811 return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3812 break;
3813 case Intrinsic::longjmp:
3814 return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3815 break;
3816 case Intrinsic::memcpy: {
3817 SDValue Op1 = getValue(I.getOperand(1));
3818 SDValue Op2 = getValue(I.getOperand(2));
3819 SDValue Op3 = getValue(I.getOperand(3));
3820 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3821 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3822 I.getOperand(1), 0, I.getOperand(2), 0));
3823 return 0;
3825 case Intrinsic::memset: {
3826 SDValue Op1 = getValue(I.getOperand(1));
3827 SDValue Op2 = getValue(I.getOperand(2));
3828 SDValue Op3 = getValue(I.getOperand(3));
3829 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3830 DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
3831 I.getOperand(1), 0));
3832 return 0;
3834 case Intrinsic::memmove: {
3835 SDValue Op1 = getValue(I.getOperand(1));
3836 SDValue Op2 = getValue(I.getOperand(2));
3837 SDValue Op3 = getValue(I.getOperand(3));
3838 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3840 // If the source and destination are known to not be aliases, we can
3841 // lower memmove as memcpy.
3842 uint64_t Size = -1ULL;
3843 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3844 Size = C->getZExtValue();
3845 if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3846 AliasAnalysis::NoAlias) {
3847 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3848 I.getOperand(1), 0, I.getOperand(2), 0));
3849 return 0;
3852 DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
3853 I.getOperand(1), 0, I.getOperand(2), 0));
3854 return 0;
3856 case Intrinsic::dbg_stoppoint: {
3857 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
3858 if (isValidDebugInfoIntrinsic(SPI, CodeGenOpt::Default)) {
3859 MachineFunction &MF = DAG.getMachineFunction();
3860 DebugLoc Loc = ExtractDebugLocation(SPI, MF.getDebugLocInfo());
3861 setCurDebugLoc(Loc);
3863 if (OptLevel == CodeGenOpt::None)
3864 DAG.setRoot(DAG.getDbgStopPoint(Loc, getRoot(),
3865 SPI.getLine(),
3866 SPI.getColumn(),
3867 SPI.getContext()));
3869 return 0;
3871 case Intrinsic::dbg_region_start: {
3872 DwarfWriter *DW = DAG.getDwarfWriter();
3873 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
3874 if (isValidDebugInfoIntrinsic(RSI, OptLevel) && DW
3875 && DW->ShouldEmitDwarfDebug()) {
3876 unsigned LabelID =
3877 DW->RecordRegionStart(RSI.getContext());
3878 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3879 getRoot(), LabelID));
3881 return 0;
3883 case Intrinsic::dbg_region_end: {
3884 DwarfWriter *DW = DAG.getDwarfWriter();
3885 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
3887 if (!isValidDebugInfoIntrinsic(REI, OptLevel) || !DW
3888 || !DW->ShouldEmitDwarfDebug())
3889 return 0;
3891 MachineFunction &MF = DAG.getMachineFunction();
3892 DISubprogram Subprogram(REI.getContext());
3894 if (isInlinedFnEnd(REI, MF.getFunction())) {
3895 // This is end of inlined function. Debugging information for inlined
3896 // function is not handled yet (only supported by FastISel).
3897 if (OptLevel == CodeGenOpt::None) {
3898 unsigned ID = DW->RecordInlinedFnEnd(Subprogram);
3899 if (ID != 0)
3900 // Returned ID is 0 if this is unbalanced "end of inlined
3901 // scope". This could happen if optimizer eats dbg intrinsics or
3902 // "beginning of inlined scope" is not recoginized due to missing
3903 // location info. In such cases, do ignore this region.end.
3904 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3905 getRoot(), ID));
3907 return 0;
3910 unsigned LabelID =
3911 DW->RecordRegionEnd(REI.getContext());
3912 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3913 getRoot(), LabelID));
3914 return 0;
3916 case Intrinsic::dbg_func_start: {
3917 DwarfWriter *DW = DAG.getDwarfWriter();
3918 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
3919 if (!isValidDebugInfoIntrinsic(FSI, CodeGenOpt::None))
3920 return 0;
3922 MachineFunction &MF = DAG.getMachineFunction();
3923 // This is a beginning of an inlined function.
3924 if (isInlinedFnStart(FSI, MF.getFunction())) {
3925 if (OptLevel != CodeGenOpt::None)
3926 // FIXME: Debugging informaation for inlined function is only
3927 // supported at CodeGenOpt::Node.
3928 return 0;
3930 DebugLoc PrevLoc = CurDebugLoc;
3931 // If llvm.dbg.func.start is seen in a new block before any
3932 // llvm.dbg.stoppoint intrinsic then the location info is unknown.
3933 // FIXME : Why DebugLoc is reset at the beginning of each block ?
3934 if (PrevLoc.isUnknown())
3935 return 0;
3937 // Record the source line.
3938 setCurDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
3940 if (!DW || !DW->ShouldEmitDwarfDebug())
3941 return 0;
3942 DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
3943 DISubprogram SP(FSI.getSubprogram());
3944 DICompileUnit CU(PrevLocTpl.CompileUnit);
3945 unsigned LabelID = DW->RecordInlinedFnStart(SP, CU,
3946 PrevLocTpl.Line,
3947 PrevLocTpl.Col);
3948 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3949 getRoot(), LabelID));
3950 return 0;
3953 // This is a beginning of a new function.
3954 MF.setDefaultDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
3956 if (!DW || !DW->ShouldEmitDwarfDebug())
3957 return 0;
3958 // llvm.dbg.func_start also defines beginning of function scope.
3959 DW->RecordRegionStart(FSI.getSubprogram());
3960 return 0;
3962 case Intrinsic::dbg_declare: {
3963 if (OptLevel != CodeGenOpt::None)
3964 // FIXME: Variable debug info is not supported here.
3965 return 0;
3966 DwarfWriter *DW = DAG.getDwarfWriter();
3967 if (!DW)
3968 return 0;
3969 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
3970 if (!isValidDebugInfoIntrinsic(DI, CodeGenOpt::None))
3971 return 0;
3973 Value *Variable = DI.getVariable();
3974 Value *Address = DI.getAddress();
3975 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
3976 Address = BCI->getOperand(0);
3977 AllocaInst *AI = dyn_cast<AllocaInst>(Address);
3978 // Don't handle byval struct arguments or VLAs, for example.
3979 if (!AI)
3980 return 0;
3981 DenseMap<const AllocaInst*, int>::iterator SI =
3982 FuncInfo.StaticAllocaMap.find(AI);
3983 if (SI == FuncInfo.StaticAllocaMap.end())
3984 return 0; // VLAs.
3985 int FI = SI->second;
3986 DW->RecordVariable(cast<MDNode>(Variable), FI);
3987 return 0;
3989 case Intrinsic::eh_exception: {
3990 // Insert the EXCEPTIONADDR instruction.
3991 assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
3992 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3993 SDValue Ops[1];
3994 Ops[0] = DAG.getRoot();
3995 SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
3996 setValue(&I, Op);
3997 DAG.setRoot(Op.getValue(1));
3998 return 0;
4001 case Intrinsic::eh_selector_i32:
4002 case Intrinsic::eh_selector_i64: {
4003 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4004 EVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ? MVT::i32 : MVT::i64);
4006 if (MMI) {
4007 if (CurMBB->isLandingPad())
4008 AddCatchInfo(I, MMI, CurMBB);
4009 else {
4010 #ifndef NDEBUG
4011 FuncInfo.CatchInfoLost.insert(&I);
4012 #endif
4013 // FIXME: Mark exception selector register as live in. Hack for PR1508.
4014 unsigned Reg = TLI.getExceptionSelectorRegister();
4015 if (Reg) CurMBB->addLiveIn(Reg);
4018 // Insert the EHSELECTION instruction.
4019 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
4020 SDValue Ops[2];
4021 Ops[0] = getValue(I.getOperand(1));
4022 Ops[1] = getRoot();
4023 SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
4024 setValue(&I, Op);
4025 DAG.setRoot(Op.getValue(1));
4026 } else {
4027 setValue(&I, DAG.getConstant(0, VT));
4030 return 0;
4033 case Intrinsic::eh_typeid_for_i32:
4034 case Intrinsic::eh_typeid_for_i64: {
4035 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4036 EVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
4037 MVT::i32 : MVT::i64);
4039 if (MMI) {
4040 // Find the type id for the given typeinfo.
4041 GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
4043 unsigned TypeID = MMI->getTypeIDFor(GV);
4044 setValue(&I, DAG.getConstant(TypeID, VT));
4045 } else {
4046 // Return something different to eh_selector.
4047 setValue(&I, DAG.getConstant(1, VT));
4050 return 0;
4053 case Intrinsic::eh_return_i32:
4054 case Intrinsic::eh_return_i64:
4055 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4056 MMI->setCallsEHReturn(true);
4057 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
4058 MVT::Other,
4059 getControlRoot(),
4060 getValue(I.getOperand(1)),
4061 getValue(I.getOperand(2))));
4062 } else {
4063 setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
4066 return 0;
4067 case Intrinsic::eh_unwind_init:
4068 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4069 MMI->setCallsUnwindInit(true);
4072 return 0;
4074 case Intrinsic::eh_dwarf_cfa: {
4075 EVT VT = getValue(I.getOperand(1)).getValueType();
4076 SDValue CfaArg;
4077 if (VT.bitsGT(TLI.getPointerTy()))
4078 CfaArg = DAG.getNode(ISD::TRUNCATE, dl,
4079 TLI.getPointerTy(), getValue(I.getOperand(1)));
4080 else
4081 CfaArg = DAG.getNode(ISD::SIGN_EXTEND, dl,
4082 TLI.getPointerTy(), getValue(I.getOperand(1)));
4084 SDValue Offset = DAG.getNode(ISD::ADD, dl,
4085 TLI.getPointerTy(),
4086 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
4087 TLI.getPointerTy()),
4088 CfaArg);
4089 setValue(&I, DAG.getNode(ISD::ADD, dl,
4090 TLI.getPointerTy(),
4091 DAG.getNode(ISD::FRAMEADDR, dl,
4092 TLI.getPointerTy(),
4093 DAG.getConstant(0,
4094 TLI.getPointerTy())),
4095 Offset));
4096 return 0;
4098 case Intrinsic::convertff:
4099 case Intrinsic::convertfsi:
4100 case Intrinsic::convertfui:
4101 case Intrinsic::convertsif:
4102 case Intrinsic::convertuif:
4103 case Intrinsic::convertss:
4104 case Intrinsic::convertsu:
4105 case Intrinsic::convertus:
4106 case Intrinsic::convertuu: {
4107 ISD::CvtCode Code = ISD::CVT_INVALID;
4108 switch (Intrinsic) {
4109 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
4110 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
4111 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
4112 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
4113 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
4114 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
4115 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
4116 case Intrinsic::convertus: Code = ISD::CVT_US; break;
4117 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
4119 EVT DestVT = TLI.getValueType(I.getType());
4120 Value* Op1 = I.getOperand(1);
4121 setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
4122 DAG.getValueType(DestVT),
4123 DAG.getValueType(getValue(Op1).getValueType()),
4124 getValue(I.getOperand(2)),
4125 getValue(I.getOperand(3)),
4126 Code));
4127 return 0;
4130 case Intrinsic::sqrt:
4131 setValue(&I, DAG.getNode(ISD::FSQRT, dl,
4132 getValue(I.getOperand(1)).getValueType(),
4133 getValue(I.getOperand(1))));
4134 return 0;
4135 case Intrinsic::powi:
4136 setValue(&I, DAG.getNode(ISD::FPOWI, dl,
4137 getValue(I.getOperand(1)).getValueType(),
4138 getValue(I.getOperand(1)),
4139 getValue(I.getOperand(2))));
4140 return 0;
4141 case Intrinsic::sin:
4142 setValue(&I, DAG.getNode(ISD::FSIN, dl,
4143 getValue(I.getOperand(1)).getValueType(),
4144 getValue(I.getOperand(1))));
4145 return 0;
4146 case Intrinsic::cos:
4147 setValue(&I, DAG.getNode(ISD::FCOS, dl,
4148 getValue(I.getOperand(1)).getValueType(),
4149 getValue(I.getOperand(1))));
4150 return 0;
4151 case Intrinsic::log:
4152 visitLog(I);
4153 return 0;
4154 case Intrinsic::log2:
4155 visitLog2(I);
4156 return 0;
4157 case Intrinsic::log10:
4158 visitLog10(I);
4159 return 0;
4160 case Intrinsic::exp:
4161 visitExp(I);
4162 return 0;
4163 case Intrinsic::exp2:
4164 visitExp2(I);
4165 return 0;
4166 case Intrinsic::pow:
4167 visitPow(I);
4168 return 0;
4169 case Intrinsic::pcmarker: {
4170 SDValue Tmp = getValue(I.getOperand(1));
4171 DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
4172 return 0;
4174 case Intrinsic::readcyclecounter: {
4175 SDValue Op = getRoot();
4176 SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4177 DAG.getVTList(MVT::i64, MVT::Other),
4178 &Op, 1);
4179 setValue(&I, Tmp);
4180 DAG.setRoot(Tmp.getValue(1));
4181 return 0;
4183 case Intrinsic::bswap:
4184 setValue(&I, DAG.getNode(ISD::BSWAP, dl,
4185 getValue(I.getOperand(1)).getValueType(),
4186 getValue(I.getOperand(1))));
4187 return 0;
4188 case Intrinsic::cttz: {
4189 SDValue Arg = getValue(I.getOperand(1));
4190 EVT Ty = Arg.getValueType();
4191 SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4192 setValue(&I, result);
4193 return 0;
4195 case Intrinsic::ctlz: {
4196 SDValue Arg = getValue(I.getOperand(1));
4197 EVT Ty = Arg.getValueType();
4198 SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4199 setValue(&I, result);
4200 return 0;
4202 case Intrinsic::ctpop: {
4203 SDValue Arg = getValue(I.getOperand(1));
4204 EVT Ty = Arg.getValueType();
4205 SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4206 setValue(&I, result);
4207 return 0;
4209 case Intrinsic::stacksave: {
4210 SDValue Op = getRoot();
4211 SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
4212 DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4213 setValue(&I, Tmp);
4214 DAG.setRoot(Tmp.getValue(1));
4215 return 0;
4217 case Intrinsic::stackrestore: {
4218 SDValue Tmp = getValue(I.getOperand(1));
4219 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp));
4220 return 0;
4222 case Intrinsic::stackprotector: {
4223 // Emit code into the DAG to store the stack guard onto the stack.
4224 MachineFunction &MF = DAG.getMachineFunction();
4225 MachineFrameInfo *MFI = MF.getFrameInfo();
4226 EVT PtrTy = TLI.getPointerTy();
4228 SDValue Src = getValue(I.getOperand(1)); // The guard's value.
4229 AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4231 int FI = FuncInfo.StaticAllocaMap[Slot];
4232 MFI->setStackProtectorIndex(FI);
4234 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4236 // Store the stack protector onto the stack.
4237 SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4238 PseudoSourceValue::getFixedStack(FI),
4239 0, true);
4240 setValue(&I, Result);
4241 DAG.setRoot(Result);
4242 return 0;
4244 case Intrinsic::var_annotation:
4245 // Discard annotate attributes
4246 return 0;
4248 case Intrinsic::init_trampoline: {
4249 const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4251 SDValue Ops[6];
4252 Ops[0] = getRoot();
4253 Ops[1] = getValue(I.getOperand(1));
4254 Ops[2] = getValue(I.getOperand(2));
4255 Ops[3] = getValue(I.getOperand(3));
4256 Ops[4] = DAG.getSrcValue(I.getOperand(1));
4257 Ops[5] = DAG.getSrcValue(F);
4259 SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
4260 DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4261 Ops, 6);
4263 setValue(&I, Tmp);
4264 DAG.setRoot(Tmp.getValue(1));
4265 return 0;
4268 case Intrinsic::gcroot:
4269 if (GFI) {
4270 Value *Alloca = I.getOperand(1);
4271 Constant *TypeMap = cast<Constant>(I.getOperand(2));
4273 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4274 GFI->addStackRoot(FI->getIndex(), TypeMap);
4276 return 0;
4278 case Intrinsic::gcread:
4279 case Intrinsic::gcwrite:
4280 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
4281 return 0;
4283 case Intrinsic::flt_rounds: {
4284 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
4285 return 0;
4288 case Intrinsic::trap: {
4289 DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
4290 return 0;
4293 case Intrinsic::uadd_with_overflow:
4294 return implVisitAluOverflow(I, ISD::UADDO);
4295 case Intrinsic::sadd_with_overflow:
4296 return implVisitAluOverflow(I, ISD::SADDO);
4297 case Intrinsic::usub_with_overflow:
4298 return implVisitAluOverflow(I, ISD::USUBO);
4299 case Intrinsic::ssub_with_overflow:
4300 return implVisitAluOverflow(I, ISD::SSUBO);
4301 case Intrinsic::umul_with_overflow:
4302 return implVisitAluOverflow(I, ISD::UMULO);
4303 case Intrinsic::smul_with_overflow:
4304 return implVisitAluOverflow(I, ISD::SMULO);
4306 case Intrinsic::prefetch: {
4307 SDValue Ops[4];
4308 Ops[0] = getRoot();
4309 Ops[1] = getValue(I.getOperand(1));
4310 Ops[2] = getValue(I.getOperand(2));
4311 Ops[3] = getValue(I.getOperand(3));
4312 DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4313 return 0;
4316 case Intrinsic::memory_barrier: {
4317 SDValue Ops[6];
4318 Ops[0] = getRoot();
4319 for (int x = 1; x < 6; ++x)
4320 Ops[x] = getValue(I.getOperand(x));
4322 DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4323 return 0;
4325 case Intrinsic::atomic_cmp_swap: {
4326 SDValue Root = getRoot();
4327 SDValue L =
4328 DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4329 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4330 Root,
4331 getValue(I.getOperand(1)),
4332 getValue(I.getOperand(2)),
4333 getValue(I.getOperand(3)),
4334 I.getOperand(1));
4335 setValue(&I, L);
4336 DAG.setRoot(L.getValue(1));
4337 return 0;
4339 case Intrinsic::atomic_load_add:
4340 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4341 case Intrinsic::atomic_load_sub:
4342 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4343 case Intrinsic::atomic_load_or:
4344 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4345 case Intrinsic::atomic_load_xor:
4346 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4347 case Intrinsic::atomic_load_and:
4348 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4349 case Intrinsic::atomic_load_nand:
4350 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4351 case Intrinsic::atomic_load_max:
4352 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4353 case Intrinsic::atomic_load_min:
4354 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4355 case Intrinsic::atomic_load_umin:
4356 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4357 case Intrinsic::atomic_load_umax:
4358 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4359 case Intrinsic::atomic_swap:
4360 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4364 /// Test if the given instruction is in a position to be optimized
4365 /// with a tail-call. This roughly means that it's in a block with
4366 /// a return and there's nothing that needs to be scheduled
4367 /// between it and the return.
4369 /// This function only tests target-independent requirements.
4370 /// For target-dependent requirements, a target should override
4371 /// TargetLowering::IsEligibleForTailCallOptimization.
4373 static bool
4374 isInTailCallPosition(const Instruction *I, Attributes RetAttr,
4375 const TargetLowering &TLI) {
4376 const BasicBlock *ExitBB = I->getParent();
4377 const TerminatorInst *Term = ExitBB->getTerminator();
4378 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
4379 const Function *F = ExitBB->getParent();
4381 // The block must end in a return statement or an unreachable.
4382 if (!Ret && !isa<UnreachableInst>(Term)) return false;
4384 // If I will have a chain, make sure no other instruction that will have a
4385 // chain interposes between I and the return.
4386 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
4387 !I->isSafeToSpeculativelyExecute())
4388 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
4389 --BBI) {
4390 if (&*BBI == I)
4391 break;
4392 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
4393 !BBI->isSafeToSpeculativelyExecute())
4394 return false;
4397 // If the block ends with a void return or unreachable, it doesn't matter
4398 // what the call's return type is.
4399 if (!Ret || Ret->getNumOperands() == 0) return true;
4401 // Conservatively require the attributes of the call to match those of
4402 // the return.
4403 if (F->getAttributes().getRetAttributes() != RetAttr)
4404 return false;
4406 // Otherwise, make sure the unmodified return value of I is the return value.
4407 for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
4408 U = dyn_cast<Instruction>(U->getOperand(0))) {
4409 if (!U)
4410 return false;
4411 if (!U->hasOneUse())
4412 return false;
4413 if (U == I)
4414 break;
4415 // Check for a truly no-op truncate.
4416 if (isa<TruncInst>(U) &&
4417 TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
4418 continue;
4419 // Check for a truly no-op bitcast.
4420 if (isa<BitCastInst>(U) &&
4421 (U->getOperand(0)->getType() == U->getType() ||
4422 (isa<PointerType>(U->getOperand(0)->getType()) &&
4423 isa<PointerType>(U->getType()))))
4424 continue;
4425 // Otherwise it's not a true no-op.
4426 return false;
4429 return true;
4432 void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
4433 bool isTailCall,
4434 MachineBasicBlock *LandingPad) {
4435 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4436 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4437 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4438 unsigned BeginLabel = 0, EndLabel = 0;
4440 TargetLowering::ArgListTy Args;
4441 TargetLowering::ArgListEntry Entry;
4442 Args.reserve(CS.arg_size());
4443 unsigned j = 1;
4444 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4445 i != e; ++i, ++j) {
4446 SDValue ArgNode = getValue(*i);
4447 Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4449 unsigned attrInd = i - CS.arg_begin() + 1;
4450 Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt);
4451 Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt);
4452 Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4453 Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet);
4454 Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest);
4455 Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4456 Entry.Alignment = CS.getParamAlignment(attrInd);
4457 Args.push_back(Entry);
4460 if (LandingPad && MMI) {
4461 // Insert a label before the invoke call to mark the try range. This can be
4462 // used to detect deletion of the invoke via the MachineModuleInfo.
4463 BeginLabel = MMI->NextLabelID();
4465 // Both PendingLoads and PendingExports must be flushed here;
4466 // this call might not return.
4467 (void)getRoot();
4468 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4469 getControlRoot(), BeginLabel));
4472 // Check if target-independent constraints permit a tail call here.
4473 // Target-dependent constraints are checked within TLI.LowerCallTo.
4474 if (isTailCall &&
4475 !isInTailCallPosition(CS.getInstruction(),
4476 CS.getAttributes().getRetAttributes(),
4477 TLI))
4478 isTailCall = false;
4480 std::pair<SDValue,SDValue> Result =
4481 TLI.LowerCallTo(getRoot(), CS.getType(),
4482 CS.paramHasAttr(0, Attribute::SExt),
4483 CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4484 CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
4485 CS.getCallingConv(),
4486 isTailCall,
4487 !CS.getInstruction()->use_empty(),
4488 Callee, Args, DAG, getCurDebugLoc());
4489 assert((isTailCall || Result.second.getNode()) &&
4490 "Non-null chain expected with non-tail call!");
4491 assert((Result.second.getNode() || !Result.first.getNode()) &&
4492 "Null value expected with tail call!");
4493 if (Result.first.getNode())
4494 setValue(CS.getInstruction(), Result.first);
4495 // As a special case, a null chain means that a tail call has
4496 // been emitted and the DAG root is already updated.
4497 if (Result.second.getNode())
4498 DAG.setRoot(Result.second);
4499 else
4500 HasTailCall = true;
4502 if (LandingPad && MMI) {
4503 // Insert a label at the end of the invoke call to mark the try range. This
4504 // can be used to detect deletion of the invoke via the MachineModuleInfo.
4505 EndLabel = MMI->NextLabelID();
4506 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4507 getRoot(), EndLabel));
4509 // Inform MachineModuleInfo of range.
4510 MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4515 void SelectionDAGLowering::visitCall(CallInst &I) {
4516 const char *RenameFn = 0;
4517 if (Function *F = I.getCalledFunction()) {
4518 if (F->isDeclaration()) {
4519 const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
4520 if (II) {
4521 if (unsigned IID = II->getIntrinsicID(F)) {
4522 RenameFn = visitIntrinsicCall(I, IID);
4523 if (!RenameFn)
4524 return;
4527 if (unsigned IID = F->getIntrinsicID()) {
4528 RenameFn = visitIntrinsicCall(I, IID);
4529 if (!RenameFn)
4530 return;
4534 // Check for well-known libc/libm calls. If the function is internal, it
4535 // can't be a library call.
4536 if (!F->hasLocalLinkage() && F->hasName()) {
4537 StringRef Name = F->getName();
4538 if (Name == "copysign" || Name == "copysignf") {
4539 if (I.getNumOperands() == 3 && // Basic sanity checks.
4540 I.getOperand(1)->getType()->isFloatingPoint() &&
4541 I.getType() == I.getOperand(1)->getType() &&
4542 I.getType() == I.getOperand(2)->getType()) {
4543 SDValue LHS = getValue(I.getOperand(1));
4544 SDValue RHS = getValue(I.getOperand(2));
4545 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4546 LHS.getValueType(), LHS, RHS));
4547 return;
4549 } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
4550 if (I.getNumOperands() == 2 && // Basic sanity checks.
4551 I.getOperand(1)->getType()->isFloatingPoint() &&
4552 I.getType() == I.getOperand(1)->getType()) {
4553 SDValue Tmp = getValue(I.getOperand(1));
4554 setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4555 Tmp.getValueType(), Tmp));
4556 return;
4558 } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
4559 if (I.getNumOperands() == 2 && // Basic sanity checks.
4560 I.getOperand(1)->getType()->isFloatingPoint() &&
4561 I.getType() == I.getOperand(1)->getType()) {
4562 SDValue Tmp = getValue(I.getOperand(1));
4563 setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4564 Tmp.getValueType(), Tmp));
4565 return;
4567 } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
4568 if (I.getNumOperands() == 2 && // Basic sanity checks.
4569 I.getOperand(1)->getType()->isFloatingPoint() &&
4570 I.getType() == I.getOperand(1)->getType()) {
4571 SDValue Tmp = getValue(I.getOperand(1));
4572 setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4573 Tmp.getValueType(), Tmp));
4574 return;
4578 } else if (isa<InlineAsm>(I.getOperand(0))) {
4579 visitInlineAsm(&I);
4580 return;
4583 SDValue Callee;
4584 if (!RenameFn)
4585 Callee = getValue(I.getOperand(0));
4586 else
4587 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4589 // Check if we can potentially perform a tail call. More detailed
4590 // checking is be done within LowerCallTo, after more information
4591 // about the call is known.
4592 bool isTailCall = PerformTailCallOpt && I.isTailCall();
4594 LowerCallTo(&I, Callee, isTailCall);
4598 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4599 /// this value and returns the result as a ValueVT value. This uses
4600 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4601 /// If the Flag pointer is NULL, no flag is used.
4602 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4603 SDValue &Chain,
4604 SDValue *Flag) const {
4605 // Assemble the legal parts into the final values.
4606 SmallVector<SDValue, 4> Values(ValueVTs.size());
4607 SmallVector<SDValue, 8> Parts;
4608 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4609 // Copy the legal parts from the registers.
4610 EVT ValueVT = ValueVTs[Value];
4611 unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4612 EVT RegisterVT = RegVTs[Value];
4614 Parts.resize(NumRegs);
4615 for (unsigned i = 0; i != NumRegs; ++i) {
4616 SDValue P;
4617 if (Flag == 0)
4618 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
4619 else {
4620 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
4621 *Flag = P.getValue(2);
4623 Chain = P.getValue(1);
4625 // If the source register was virtual and if we know something about it,
4626 // add an assert node.
4627 if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4628 RegisterVT.isInteger() && !RegisterVT.isVector()) {
4629 unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4630 FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4631 if (FLI.LiveOutRegInfo.size() > SlotNo) {
4632 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4634 unsigned RegSize = RegisterVT.getSizeInBits();
4635 unsigned NumSignBits = LOI.NumSignBits;
4636 unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4638 // FIXME: We capture more information than the dag can represent. For
4639 // now, just use the tightest assertzext/assertsext possible.
4640 bool isSExt = true;
4641 EVT FromVT(MVT::Other);
4642 if (NumSignBits == RegSize)
4643 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
4644 else if (NumZeroBits >= RegSize-1)
4645 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
4646 else if (NumSignBits > RegSize-8)
4647 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
4648 else if (NumZeroBits >= RegSize-8)
4649 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
4650 else if (NumSignBits > RegSize-16)
4651 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
4652 else if (NumZeroBits >= RegSize-16)
4653 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4654 else if (NumSignBits > RegSize-32)
4655 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
4656 else if (NumZeroBits >= RegSize-32)
4657 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4659 if (FromVT != MVT::Other) {
4660 P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4661 RegisterVT, P, DAG.getValueType(FromVT));
4667 Parts[i] = P;
4670 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
4671 NumRegs, RegisterVT, ValueVT);
4672 Part += NumRegs;
4673 Parts.clear();
4676 return DAG.getNode(ISD::MERGE_VALUES, dl,
4677 DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4678 &Values[0], ValueVTs.size());
4681 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4682 /// specified value into the registers specified by this object. This uses
4683 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4684 /// If the Flag pointer is NULL, no flag is used.
4685 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4686 SDValue &Chain, SDValue *Flag) const {
4687 // Get the list of the values's legal parts.
4688 unsigned NumRegs = Regs.size();
4689 SmallVector<SDValue, 8> Parts(NumRegs);
4690 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4691 EVT ValueVT = ValueVTs[Value];
4692 unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4693 EVT RegisterVT = RegVTs[Value];
4695 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
4696 &Parts[Part], NumParts, RegisterVT);
4697 Part += NumParts;
4700 // Copy the parts into the registers.
4701 SmallVector<SDValue, 8> Chains(NumRegs);
4702 for (unsigned i = 0; i != NumRegs; ++i) {
4703 SDValue Part;
4704 if (Flag == 0)
4705 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
4706 else {
4707 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
4708 *Flag = Part.getValue(1);
4710 Chains[i] = Part.getValue(0);
4713 if (NumRegs == 1 || Flag)
4714 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4715 // flagged to it. That is the CopyToReg nodes and the user are considered
4716 // a single scheduling unit. If we create a TokenFactor and return it as
4717 // chain, then the TokenFactor is both a predecessor (operand) of the
4718 // user as well as a successor (the TF operands are flagged to the user).
4719 // c1, f1 = CopyToReg
4720 // c2, f2 = CopyToReg
4721 // c3 = TokenFactor c1, c2
4722 // ...
4723 // = op c3, ..., f2
4724 Chain = Chains[NumRegs-1];
4725 else
4726 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4729 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
4730 /// operand list. This adds the code marker and includes the number of
4731 /// values added into it.
4732 void RegsForValue::AddInlineAsmOperands(unsigned Code,
4733 bool HasMatching,unsigned MatchingIdx,
4734 SelectionDAG &DAG,
4735 std::vector<SDValue> &Ops) const {
4736 EVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4737 assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
4738 unsigned Flag = Code | (Regs.size() << 3);
4739 if (HasMatching)
4740 Flag |= 0x80000000 | (MatchingIdx << 16);
4741 Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
4742 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4743 unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
4744 EVT RegisterVT = RegVTs[Value];
4745 for (unsigned i = 0; i != NumRegs; ++i) {
4746 assert(Reg < Regs.size() && "Mismatch in # registers expected");
4747 Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4752 /// isAllocatableRegister - If the specified register is safe to allocate,
4753 /// i.e. it isn't a stack pointer or some other special register, return the
4754 /// register class for the register. Otherwise, return null.
4755 static const TargetRegisterClass *
4756 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4757 const TargetLowering &TLI,
4758 const TargetRegisterInfo *TRI) {
4759 EVT FoundVT = MVT::Other;
4760 const TargetRegisterClass *FoundRC = 0;
4761 for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4762 E = TRI->regclass_end(); RCI != E; ++RCI) {
4763 EVT ThisVT = MVT::Other;
4765 const TargetRegisterClass *RC = *RCI;
4766 // If none of the the value types for this register class are valid, we
4767 // can't use it. For example, 64-bit reg classes on 32-bit targets.
4768 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4769 I != E; ++I) {
4770 if (TLI.isTypeLegal(*I)) {
4771 // If we have already found this register in a different register class,
4772 // choose the one with the largest VT specified. For example, on
4773 // PowerPC, we favor f64 register classes over f32.
4774 if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4775 ThisVT = *I;
4776 break;
4781 if (ThisVT == MVT::Other) continue;
4783 // NOTE: This isn't ideal. In particular, this might allocate the
4784 // frame pointer in functions that need it (due to them not being taken
4785 // out of allocation, because a variable sized allocation hasn't been seen
4786 // yet). This is a slight code pessimization, but should still work.
4787 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4788 E = RC->allocation_order_end(MF); I != E; ++I)
4789 if (*I == Reg) {
4790 // We found a matching register class. Keep looking at others in case
4791 // we find one with larger registers that this physreg is also in.
4792 FoundRC = RC;
4793 FoundVT = ThisVT;
4794 break;
4797 return FoundRC;
4801 namespace llvm {
4802 /// AsmOperandInfo - This contains information for each constraint that we are
4803 /// lowering.
4804 class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4805 public TargetLowering::AsmOperandInfo {
4806 public:
4807 /// CallOperand - If this is the result output operand or a clobber
4808 /// this is null, otherwise it is the incoming operand to the CallInst.
4809 /// This gets modified as the asm is processed.
4810 SDValue CallOperand;
4812 /// AssignedRegs - If this is a register or register class operand, this
4813 /// contains the set of register corresponding to the operand.
4814 RegsForValue AssignedRegs;
4816 explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4817 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4820 /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4821 /// busy in OutputRegs/InputRegs.
4822 void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4823 std::set<unsigned> &OutputRegs,
4824 std::set<unsigned> &InputRegs,
4825 const TargetRegisterInfo &TRI) const {
4826 if (isOutReg) {
4827 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4828 MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4830 if (isInReg) {
4831 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4832 MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4836 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
4837 /// corresponds to. If there is no Value* for this operand, it returns
4838 /// MVT::Other.
4839 EVT getCallOperandValEVT(LLVMContext &Context,
4840 const TargetLowering &TLI,
4841 const TargetData *TD) const {
4842 if (CallOperandVal == 0) return MVT::Other;
4844 if (isa<BasicBlock>(CallOperandVal))
4845 return TLI.getPointerTy();
4847 const llvm::Type *OpTy = CallOperandVal->getType();
4849 // If this is an indirect operand, the operand is a pointer to the
4850 // accessed type.
4851 if (isIndirect)
4852 OpTy = cast<PointerType>(OpTy)->getElementType();
4854 // If OpTy is not a single value, it may be a struct/union that we
4855 // can tile with integers.
4856 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4857 unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4858 switch (BitSize) {
4859 default: break;
4860 case 1:
4861 case 8:
4862 case 16:
4863 case 32:
4864 case 64:
4865 case 128:
4866 OpTy = IntegerType::get(Context, BitSize);
4867 break;
4871 return TLI.getValueType(OpTy, true);
4874 private:
4875 /// MarkRegAndAliases - Mark the specified register and all aliases in the
4876 /// specified set.
4877 static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4878 const TargetRegisterInfo &TRI) {
4879 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4880 Regs.insert(Reg);
4881 if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4882 for (; *Aliases; ++Aliases)
4883 Regs.insert(*Aliases);
4886 } // end llvm namespace.
4889 /// GetRegistersForValue - Assign registers (virtual or physical) for the
4890 /// specified operand. We prefer to assign virtual registers, to allow the
4891 /// register allocator handle the assignment process. However, if the asm uses
4892 /// features that we can't model on machineinstrs, we have SDISel do the
4893 /// allocation. This produces generally horrible, but correct, code.
4895 /// OpInfo describes the operand.
4896 /// Input and OutputRegs are the set of already allocated physical registers.
4898 void SelectionDAGLowering::
4899 GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4900 std::set<unsigned> &OutputRegs,
4901 std::set<unsigned> &InputRegs) {
4902 LLVMContext &Context = FuncInfo.Fn->getContext();
4904 // Compute whether this value requires an input register, an output register,
4905 // or both.
4906 bool isOutReg = false;
4907 bool isInReg = false;
4908 switch (OpInfo.Type) {
4909 case InlineAsm::isOutput:
4910 isOutReg = true;
4912 // If there is an input constraint that matches this, we need to reserve
4913 // the input register so no other inputs allocate to it.
4914 isInReg = OpInfo.hasMatchingInput();
4915 break;
4916 case InlineAsm::isInput:
4917 isInReg = true;
4918 isOutReg = false;
4919 break;
4920 case InlineAsm::isClobber:
4921 isOutReg = true;
4922 isInReg = true;
4923 break;
4927 MachineFunction &MF = DAG.getMachineFunction();
4928 SmallVector<unsigned, 4> Regs;
4930 // If this is a constraint for a single physreg, or a constraint for a
4931 // register class, find it.
4932 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4933 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4934 OpInfo.ConstraintVT);
4936 unsigned NumRegs = 1;
4937 if (OpInfo.ConstraintVT != MVT::Other) {
4938 // If this is a FP input in an integer register (or visa versa) insert a bit
4939 // cast of the input value. More generally, handle any case where the input
4940 // value disagrees with the register class we plan to stick this in.
4941 if (OpInfo.Type == InlineAsm::isInput &&
4942 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4943 // Try to convert to the first EVT that the reg class contains. If the
4944 // types are identical size, use a bitcast to convert (e.g. two differing
4945 // vector types).
4946 EVT RegVT = *PhysReg.second->vt_begin();
4947 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4948 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4949 RegVT, OpInfo.CallOperand);
4950 OpInfo.ConstraintVT = RegVT;
4951 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4952 // If the input is a FP value and we want it in FP registers, do a
4953 // bitcast to the corresponding integer type. This turns an f64 value
4954 // into i64, which can be passed with two i32 values on a 32-bit
4955 // machine.
4956 RegVT = EVT::getIntegerVT(Context,
4957 OpInfo.ConstraintVT.getSizeInBits());
4958 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4959 RegVT, OpInfo.CallOperand);
4960 OpInfo.ConstraintVT = RegVT;
4964 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
4967 EVT RegVT;
4968 EVT ValueVT = OpInfo.ConstraintVT;
4970 // If this is a constraint for a specific physical register, like {r17},
4971 // assign it now.
4972 if (unsigned AssignedReg = PhysReg.first) {
4973 const TargetRegisterClass *RC = PhysReg.second;
4974 if (OpInfo.ConstraintVT == MVT::Other)
4975 ValueVT = *RC->vt_begin();
4977 // Get the actual register value type. This is important, because the user
4978 // may have asked for (e.g.) the AX register in i32 type. We need to
4979 // remember that AX is actually i16 to get the right extension.
4980 RegVT = *RC->vt_begin();
4982 // This is a explicit reference to a physical register.
4983 Regs.push_back(AssignedReg);
4985 // If this is an expanded reference, add the rest of the regs to Regs.
4986 if (NumRegs != 1) {
4987 TargetRegisterClass::iterator I = RC->begin();
4988 for (; *I != AssignedReg; ++I)
4989 assert(I != RC->end() && "Didn't find reg!");
4991 // Already added the first reg.
4992 --NumRegs; ++I;
4993 for (; NumRegs; --NumRegs, ++I) {
4994 assert(I != RC->end() && "Ran out of registers to allocate!");
4995 Regs.push_back(*I);
4998 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4999 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5000 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5001 return;
5004 // Otherwise, if this was a reference to an LLVM register class, create vregs
5005 // for this reference.
5006 if (const TargetRegisterClass *RC = PhysReg.second) {
5007 RegVT = *RC->vt_begin();
5008 if (OpInfo.ConstraintVT == MVT::Other)
5009 ValueVT = RegVT;
5011 // Create the appropriate number of virtual registers.
5012 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5013 for (; NumRegs; --NumRegs)
5014 Regs.push_back(RegInfo.createVirtualRegister(RC));
5016 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5017 return;
5020 // This is a reference to a register class that doesn't directly correspond
5021 // to an LLVM register class. Allocate NumRegs consecutive, available,
5022 // registers from the class.
5023 std::vector<unsigned> RegClassRegs
5024 = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
5025 OpInfo.ConstraintVT);
5027 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5028 unsigned NumAllocated = 0;
5029 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
5030 unsigned Reg = RegClassRegs[i];
5031 // See if this register is available.
5032 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
5033 (isInReg && InputRegs.count(Reg))) { // Already used.
5034 // Make sure we find consecutive registers.
5035 NumAllocated = 0;
5036 continue;
5039 // Check to see if this register is allocatable (i.e. don't give out the
5040 // stack pointer).
5041 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
5042 if (!RC) { // Couldn't allocate this register.
5043 // Reset NumAllocated to make sure we return consecutive registers.
5044 NumAllocated = 0;
5045 continue;
5048 // Okay, this register is good, we can use it.
5049 ++NumAllocated;
5051 // If we allocated enough consecutive registers, succeed.
5052 if (NumAllocated == NumRegs) {
5053 unsigned RegStart = (i-NumAllocated)+1;
5054 unsigned RegEnd = i+1;
5055 // Mark all of the allocated registers used.
5056 for (unsigned i = RegStart; i != RegEnd; ++i)
5057 Regs.push_back(RegClassRegs[i]);
5059 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
5060 OpInfo.ConstraintVT);
5061 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5062 return;
5066 // Otherwise, we couldn't allocate enough registers for this.
5069 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
5070 /// processed uses a memory 'm' constraint.
5071 static bool
5072 hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
5073 const TargetLowering &TLI) {
5074 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
5075 InlineAsm::ConstraintInfo &CI = CInfos[i];
5076 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
5077 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
5078 if (CType == TargetLowering::C_Memory)
5079 return true;
5082 // Indirect operand accesses access memory.
5083 if (CI.isIndirect)
5084 return true;
5087 return false;
5090 /// visitInlineAsm - Handle a call to an InlineAsm object.
5092 void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
5093 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
5095 /// ConstraintOperands - Information about all of the constraints.
5096 std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5098 std::set<unsigned> OutputRegs, InputRegs;
5100 // Do a prepass over the constraints, canonicalizing them, and building up the
5101 // ConstraintOperands list.
5102 std::vector<InlineAsm::ConstraintInfo>
5103 ConstraintInfos = IA->ParseConstraints();
5105 bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5107 SDValue Chain, Flag;
5109 // We won't need to flush pending loads if this asm doesn't touch
5110 // memory and is nonvolatile.
5111 if (hasMemory || IA->hasSideEffects())
5112 Chain = getRoot();
5113 else
5114 Chain = DAG.getRoot();
5116 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
5117 unsigned ResNo = 0; // ResNo - The result number of the next output.
5118 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5119 ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5120 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5122 EVT OpVT = MVT::Other;
5124 // Compute the value type for each operand.
5125 switch (OpInfo.Type) {
5126 case InlineAsm::isOutput:
5127 // Indirect outputs just consume an argument.
5128 if (OpInfo.isIndirect) {
5129 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5130 break;
5133 // The return value of the call is this value. As such, there is no
5134 // corresponding argument.
5135 assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5136 "Bad inline asm!");
5137 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5138 OpVT = TLI.getValueType(STy->getElementType(ResNo));
5139 } else {
5140 assert(ResNo == 0 && "Asm only has one result!");
5141 OpVT = TLI.getValueType(CS.getType());
5143 ++ResNo;
5144 break;
5145 case InlineAsm::isInput:
5146 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5147 break;
5148 case InlineAsm::isClobber:
5149 // Nothing to do.
5150 break;
5153 // If this is an input or an indirect output, process the call argument.
5154 // BasicBlocks are labels, currently appearing only in asm's.
5155 if (OpInfo.CallOperandVal) {
5156 // Strip bitcasts, if any. This mostly comes up for functions.
5157 OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
5159 if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5160 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5161 } else {
5162 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5165 OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD);
5168 OpInfo.ConstraintVT = OpVT;
5171 // Second pass over the constraints: compute which constraint option to use
5172 // and assign registers to constraints that want a specific physreg.
5173 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5174 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5176 // If this is an output operand with a matching input operand, look up the
5177 // matching input. If their types mismatch, e.g. one is an integer, the
5178 // other is floating point, or their sizes are different, flag it as an
5179 // error.
5180 if (OpInfo.hasMatchingInput()) {
5181 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5182 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5183 if ((OpInfo.ConstraintVT.isInteger() !=
5184 Input.ConstraintVT.isInteger()) ||
5185 (OpInfo.ConstraintVT.getSizeInBits() !=
5186 Input.ConstraintVT.getSizeInBits())) {
5187 llvm_report_error("Unsupported asm: input constraint"
5188 " with a matching output constraint of incompatible"
5189 " type!");
5191 Input.ConstraintVT = OpInfo.ConstraintVT;
5195 // Compute the constraint code and ConstraintType to use.
5196 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5198 // If this is a memory input, and if the operand is not indirect, do what we
5199 // need to to provide an address for the memory input.
5200 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5201 !OpInfo.isIndirect) {
5202 assert(OpInfo.Type == InlineAsm::isInput &&
5203 "Can only indirectify direct input operands!");
5205 // Memory operands really want the address of the value. If we don't have
5206 // an indirect input, put it in the constpool if we can, otherwise spill
5207 // it to a stack slot.
5209 // If the operand is a float, integer, or vector constant, spill to a
5210 // constant pool entry to get its address.
5211 Value *OpVal = OpInfo.CallOperandVal;
5212 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5213 isa<ConstantVector>(OpVal)) {
5214 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5215 TLI.getPointerTy());
5216 } else {
5217 // Otherwise, create a stack slot and emit a store to it before the
5218 // asm.
5219 const Type *Ty = OpVal->getType();
5220 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5221 unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5222 MachineFunction &MF = DAG.getMachineFunction();
5223 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
5224 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5225 Chain = DAG.getStore(Chain, getCurDebugLoc(),
5226 OpInfo.CallOperand, StackSlot, NULL, 0);
5227 OpInfo.CallOperand = StackSlot;
5230 // There is no longer a Value* corresponding to this operand.
5231 OpInfo.CallOperandVal = 0;
5232 // It is now an indirect operand.
5233 OpInfo.isIndirect = true;
5236 // If this constraint is for a specific register, allocate it before
5237 // anything else.
5238 if (OpInfo.ConstraintType == TargetLowering::C_Register)
5239 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5241 ConstraintInfos.clear();
5244 // Second pass - Loop over all of the operands, assigning virtual or physregs
5245 // to register class operands.
5246 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5247 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5249 // C_Register operands have already been allocated, Other/Memory don't need
5250 // to be.
5251 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5252 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5255 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5256 std::vector<SDValue> AsmNodeOperands;
5257 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
5258 AsmNodeOperands.push_back(
5259 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5262 // Loop over all of the inputs, copying the operand values into the
5263 // appropriate registers and processing the output regs.
5264 RegsForValue RetValRegs;
5266 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5267 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5269 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5270 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5272 switch (OpInfo.Type) {
5273 case InlineAsm::isOutput: {
5274 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5275 OpInfo.ConstraintType != TargetLowering::C_Register) {
5276 // Memory output, or 'other' output (e.g. 'X' constraint).
5277 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5279 // Add information to the INLINEASM node to know about this output.
5280 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5281 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5282 TLI.getPointerTy()));
5283 AsmNodeOperands.push_back(OpInfo.CallOperand);
5284 break;
5287 // Otherwise, this is a register or register class output.
5289 // Copy the output from the appropriate register. Find a register that
5290 // we can use.
5291 if (OpInfo.AssignedRegs.Regs.empty()) {
5292 llvm_report_error("Couldn't allocate output reg for"
5293 " constraint '" + OpInfo.ConstraintCode + "'!");
5296 // If this is an indirect operand, store through the pointer after the
5297 // asm.
5298 if (OpInfo.isIndirect) {
5299 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5300 OpInfo.CallOperandVal));
5301 } else {
5302 // This is the result value of the call.
5303 assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5304 "Bad inline asm!");
5305 // Concatenate this output onto the outputs list.
5306 RetValRegs.append(OpInfo.AssignedRegs);
5309 // Add information to the INLINEASM node to know that this register is
5310 // set.
5311 OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5312 6 /* EARLYCLOBBER REGDEF */ :
5313 2 /* REGDEF */ ,
5314 false,
5316 DAG, AsmNodeOperands);
5317 break;
5319 case InlineAsm::isInput: {
5320 SDValue InOperandVal = OpInfo.CallOperand;
5322 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
5323 // If this is required to match an output register we have already set,
5324 // just use its register.
5325 unsigned OperandNo = OpInfo.getMatchedOperand();
5327 // Scan until we find the definition we already emitted of this operand.
5328 // When we find it, create a RegsForValue operand.
5329 unsigned CurOp = 2; // The first operand.
5330 for (; OperandNo; --OperandNo) {
5331 // Advance to the next operand.
5332 unsigned OpFlag =
5333 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5334 assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5335 (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5336 (OpFlag & 7) == 4 /*MEM*/) &&
5337 "Skipped past definitions?");
5338 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5341 unsigned OpFlag =
5342 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5343 if ((OpFlag & 7) == 2 /*REGDEF*/
5344 || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5345 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5346 if (OpInfo.isIndirect) {
5347 llvm_report_error("Don't know how to handle tied indirect "
5348 "register inputs yet!");
5350 RegsForValue MatchedRegs;
5351 MatchedRegs.TLI = &TLI;
5352 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5353 EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5354 MatchedRegs.RegVTs.push_back(RegVT);
5355 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5356 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5357 i != e; ++i)
5358 MatchedRegs.Regs.
5359 push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5361 // Use the produced MatchedRegs object to
5362 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5363 Chain, &Flag);
5364 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5365 true, OpInfo.getMatchedOperand(),
5366 DAG, AsmNodeOperands);
5367 break;
5368 } else {
5369 assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5370 assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5371 "Unexpected number of operands");
5372 // Add information to the INLINEASM node to know about this input.
5373 // See InlineAsm.h isUseOperandTiedToDef.
5374 OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5375 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5376 TLI.getPointerTy()));
5377 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5378 break;
5382 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5383 assert(!OpInfo.isIndirect &&
5384 "Don't know how to handle indirect other inputs yet!");
5386 std::vector<SDValue> Ops;
5387 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5388 hasMemory, Ops, DAG);
5389 if (Ops.empty()) {
5390 llvm_report_error("Invalid operand for inline asm"
5391 " constraint '" + OpInfo.ConstraintCode + "'!");
5394 // Add information to the INLINEASM node to know about this input.
5395 unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5396 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5397 TLI.getPointerTy()));
5398 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5399 break;
5400 } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5401 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5402 assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5403 "Memory operands expect pointer values");
5405 // Add information to the INLINEASM node to know about this input.
5406 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5407 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5408 TLI.getPointerTy()));
5409 AsmNodeOperands.push_back(InOperandVal);
5410 break;
5413 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5414 OpInfo.ConstraintType == TargetLowering::C_Register) &&
5415 "Unknown constraint type!");
5416 assert(!OpInfo.isIndirect &&
5417 "Don't know how to handle indirect register inputs yet!");
5419 // Copy the input into the appropriate registers.
5420 if (OpInfo.AssignedRegs.Regs.empty()) {
5421 llvm_report_error("Couldn't allocate input reg for"
5422 " constraint '"+ OpInfo.ConstraintCode +"'!");
5425 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5426 Chain, &Flag);
5428 OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
5429 DAG, AsmNodeOperands);
5430 break;
5432 case InlineAsm::isClobber: {
5433 // Add the clobbered value to the operand list, so that the register
5434 // allocator is aware that the physreg got clobbered.
5435 if (!OpInfo.AssignedRegs.Regs.empty())
5436 OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5437 false, 0, DAG,AsmNodeOperands);
5438 break;
5443 // Finish up input operands.
5444 AsmNodeOperands[0] = Chain;
5445 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5447 Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5448 DAG.getVTList(MVT::Other, MVT::Flag),
5449 &AsmNodeOperands[0], AsmNodeOperands.size());
5450 Flag = Chain.getValue(1);
5452 // If this asm returns a register value, copy the result from that register
5453 // and set it as the value of the call.
5454 if (!RetValRegs.Regs.empty()) {
5455 SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5456 Chain, &Flag);
5458 // FIXME: Why don't we do this for inline asms with MRVs?
5459 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5460 EVT ResultType = TLI.getValueType(CS.getType());
5462 // If any of the results of the inline asm is a vector, it may have the
5463 // wrong width/num elts. This can happen for register classes that can
5464 // contain multiple different value types. The preg or vreg allocated may
5465 // not have the same VT as was expected. Convert it to the right type
5466 // with bit_convert.
5467 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5468 Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5469 ResultType, Val);
5471 } else if (ResultType != Val.getValueType() &&
5472 ResultType.isInteger() && Val.getValueType().isInteger()) {
5473 // If a result value was tied to an input value, the computed result may
5474 // have a wider width than the expected result. Extract the relevant
5475 // portion.
5476 Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5479 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5482 setValue(CS.getInstruction(), Val);
5483 // Don't need to use this as a chain in this case.
5484 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
5485 return;
5488 std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5490 // Process indirect outputs, first output all of the flagged copies out of
5491 // physregs.
5492 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5493 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5494 Value *Ptr = IndirectStoresToEmit[i].second;
5495 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5496 Chain, &Flag);
5497 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5501 // Emit the non-flagged stores from the physregs.
5502 SmallVector<SDValue, 8> OutChains;
5503 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5504 OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
5505 StoresToEmit[i].first,
5506 getValue(StoresToEmit[i].second),
5507 StoresToEmit[i].second, 0));
5508 if (!OutChains.empty())
5509 Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5510 &OutChains[0], OutChains.size());
5511 DAG.setRoot(Chain);
5515 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
5516 SDValue Src = getValue(I.getOperand(0));
5518 // Scale up by the type size in the original i32 type width. Various
5519 // mid-level optimizers may make assumptions about demanded bits etc from the
5520 // i32-ness of the optimizer: we do not want to promote to i64 and then
5521 // multiply on 64-bit targets.
5522 // FIXME: Malloc inst should go away: PR715.
5523 uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType());
5524 if (ElementSize != 1) {
5525 // Src is always 32-bits, make sure the constant fits.
5526 assert(Src.getValueType() == MVT::i32);
5527 ElementSize = (uint32_t)ElementSize;
5528 Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
5529 Src, DAG.getConstant(ElementSize, Src.getValueType()));
5532 EVT IntPtr = TLI.getPointerTy();
5534 if (IntPtr.bitsLT(Src.getValueType()))
5535 Src = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), IntPtr, Src);
5536 else if (IntPtr.bitsGT(Src.getValueType()))
5537 Src = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), IntPtr, Src);
5539 TargetLowering::ArgListTy Args;
5540 TargetLowering::ArgListEntry Entry;
5541 Entry.Node = Src;
5542 Entry.Ty = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
5543 Args.push_back(Entry);
5545 bool isTailCall = PerformTailCallOpt &&
5546 isInTailCallPosition(&I, Attribute::None, TLI);
5547 std::pair<SDValue,SDValue> Result =
5548 TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
5549 0, CallingConv::C, isTailCall,
5550 /*isReturnValueUsed=*/true,
5551 DAG.getExternalSymbol("malloc", IntPtr),
5552 Args, DAG, getCurDebugLoc());
5553 if (Result.first.getNode())
5554 setValue(&I, Result.first); // Pointers always fit in registers
5555 if (Result.second.getNode())
5556 DAG.setRoot(Result.second);
5559 void SelectionDAGLowering::visitFree(FreeInst &I) {
5560 TargetLowering::ArgListTy Args;
5561 TargetLowering::ArgListEntry Entry;
5562 Entry.Node = getValue(I.getOperand(0));
5563 Entry.Ty = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
5564 Args.push_back(Entry);
5565 EVT IntPtr = TLI.getPointerTy();
5566 bool isTailCall = PerformTailCallOpt &&
5567 isInTailCallPosition(&I, Attribute::None, TLI);
5568 std::pair<SDValue,SDValue> Result =
5569 TLI.LowerCallTo(getRoot(), Type::getVoidTy(*DAG.getContext()),
5570 false, false, false, false,
5571 0, CallingConv::C, isTailCall,
5572 /*isReturnValueUsed=*/true,
5573 DAG.getExternalSymbol("free", IntPtr), Args, DAG,
5574 getCurDebugLoc());
5575 if (Result.second.getNode())
5576 DAG.setRoot(Result.second);
5579 void SelectionDAGLowering::visitVAStart(CallInst &I) {
5580 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5581 MVT::Other, getRoot(),
5582 getValue(I.getOperand(1)),
5583 DAG.getSrcValue(I.getOperand(1))));
5586 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
5587 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
5588 getRoot(), getValue(I.getOperand(0)),
5589 DAG.getSrcValue(I.getOperand(0)));
5590 setValue(&I, V);
5591 DAG.setRoot(V.getValue(1));
5594 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
5595 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5596 MVT::Other, getRoot(),
5597 getValue(I.getOperand(1)),
5598 DAG.getSrcValue(I.getOperand(1))));
5601 void SelectionDAGLowering::visitVACopy(CallInst &I) {
5602 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5603 MVT::Other, getRoot(),
5604 getValue(I.getOperand(1)),
5605 getValue(I.getOperand(2)),
5606 DAG.getSrcValue(I.getOperand(1)),
5607 DAG.getSrcValue(I.getOperand(2))));
5610 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
5611 /// implementation, which just calls LowerCall.
5612 /// FIXME: When all targets are
5613 /// migrated to using LowerCall, this hook should be integrated into SDISel.
5614 std::pair<SDValue, SDValue>
5615 TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5616 bool RetSExt, bool RetZExt, bool isVarArg,
5617 bool isInreg, unsigned NumFixedArgs,
5618 CallingConv::ID CallConv, bool isTailCall,
5619 bool isReturnValueUsed,
5620 SDValue Callee,
5621 ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
5623 assert((!isTailCall || PerformTailCallOpt) &&
5624 "isTailCall set when tail-call optimizations are disabled!");
5626 // Handle all of the outgoing arguments.
5627 SmallVector<ISD::OutputArg, 32> Outs;
5628 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5629 SmallVector<EVT, 4> ValueVTs;
5630 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5631 for (unsigned Value = 0, NumValues = ValueVTs.size();
5632 Value != NumValues; ++Value) {
5633 EVT VT = ValueVTs[Value];
5634 const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
5635 SDValue Op = SDValue(Args[i].Node.getNode(),
5636 Args[i].Node.getResNo() + Value);
5637 ISD::ArgFlagsTy Flags;
5638 unsigned OriginalAlignment =
5639 getTargetData()->getABITypeAlignment(ArgTy);
5641 if (Args[i].isZExt)
5642 Flags.setZExt();
5643 if (Args[i].isSExt)
5644 Flags.setSExt();
5645 if (Args[i].isInReg)
5646 Flags.setInReg();
5647 if (Args[i].isSRet)
5648 Flags.setSRet();
5649 if (Args[i].isByVal) {
5650 Flags.setByVal();
5651 const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5652 const Type *ElementTy = Ty->getElementType();
5653 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5654 unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
5655 // For ByVal, alignment should come from FE. BE will guess if this
5656 // info is not there but there are cases it cannot get right.
5657 if (Args[i].Alignment)
5658 FrameAlign = Args[i].Alignment;
5659 Flags.setByValAlign(FrameAlign);
5660 Flags.setByValSize(FrameSize);
5662 if (Args[i].isNest)
5663 Flags.setNest();
5664 Flags.setOrigAlign(OriginalAlignment);
5666 EVT PartVT = getRegisterType(RetTy->getContext(), VT);
5667 unsigned NumParts = getNumRegisters(RetTy->getContext(), VT);
5668 SmallVector<SDValue, 4> Parts(NumParts);
5669 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5671 if (Args[i].isSExt)
5672 ExtendKind = ISD::SIGN_EXTEND;
5673 else if (Args[i].isZExt)
5674 ExtendKind = ISD::ZERO_EXTEND;
5676 getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5678 for (unsigned j = 0; j != NumParts; ++j) {
5679 // if it isn't first piece, alignment must be 1
5680 ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
5681 if (NumParts > 1 && j == 0)
5682 MyFlags.Flags.setSplit();
5683 else if (j != 0)
5684 MyFlags.Flags.setOrigAlign(1);
5686 Outs.push_back(MyFlags);
5691 // Handle the incoming return values from the call.
5692 SmallVector<ISD::InputArg, 32> Ins;
5693 SmallVector<EVT, 4> RetTys;
5694 ComputeValueVTs(*this, RetTy, RetTys);
5695 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5696 EVT VT = RetTys[I];
5697 EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5698 unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5699 for (unsigned i = 0; i != NumRegs; ++i) {
5700 ISD::InputArg MyFlags;
5701 MyFlags.VT = RegisterVT;
5702 MyFlags.Used = isReturnValueUsed;
5703 if (RetSExt)
5704 MyFlags.Flags.setSExt();
5705 if (RetZExt)
5706 MyFlags.Flags.setZExt();
5707 if (isInreg)
5708 MyFlags.Flags.setInReg();
5709 Ins.push_back(MyFlags);
5713 // Check if target-dependent constraints permit a tail call here.
5714 // Target-independent constraints should be checked by the caller.
5715 if (isTailCall &&
5716 !IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, Ins, DAG))
5717 isTailCall = false;
5719 SmallVector<SDValue, 4> InVals;
5720 Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
5721 Outs, Ins, dl, DAG, InVals);
5723 // Verify that the target's LowerCall behaved as expected.
5724 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
5725 "LowerCall didn't return a valid chain!");
5726 assert((!isTailCall || InVals.empty()) &&
5727 "LowerCall emitted a return value for a tail call!");
5728 assert((isTailCall || InVals.size() == Ins.size()) &&
5729 "LowerCall didn't emit the correct number of values!");
5730 DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5731 assert(InVals[i].getNode() &&
5732 "LowerCall emitted a null value!");
5733 assert(Ins[i].VT == InVals[i].getValueType() &&
5734 "LowerCall emitted a value with the wrong type!");
5737 // For a tail call, the return value is merely live-out and there aren't
5738 // any nodes in the DAG representing it. Return a special value to
5739 // indicate that a tail call has been emitted and no more Instructions
5740 // should be processed in the current block.
5741 if (isTailCall) {
5742 DAG.setRoot(Chain);
5743 return std::make_pair(SDValue(), SDValue());
5746 // Collect the legal value parts into potentially illegal values
5747 // that correspond to the original function's return values.
5748 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5749 if (RetSExt)
5750 AssertOp = ISD::AssertSext;
5751 else if (RetZExt)
5752 AssertOp = ISD::AssertZext;
5753 SmallVector<SDValue, 4> ReturnValues;
5754 unsigned CurReg = 0;
5755 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5756 EVT VT = RetTys[I];
5757 EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5758 unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5760 SDValue ReturnValue =
5761 getCopyFromParts(DAG, dl, &InVals[CurReg], NumRegs, RegisterVT, VT,
5762 AssertOp);
5763 ReturnValues.push_back(ReturnValue);
5764 CurReg += NumRegs;
5767 // For a function returning void, there is no return value. We can't create
5768 // such a node, so we just return a null return value in that case. In
5769 // that case, nothing will actualy look at the value.
5770 if (ReturnValues.empty())
5771 return std::make_pair(SDValue(), Chain);
5773 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5774 DAG.getVTList(&RetTys[0], RetTys.size()),
5775 &ReturnValues[0], ReturnValues.size());
5777 return std::make_pair(Res, Chain);
5780 void TargetLowering::LowerOperationWrapper(SDNode *N,
5781 SmallVectorImpl<SDValue> &Results,
5782 SelectionDAG &DAG) {
5783 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5784 if (Res.getNode())
5785 Results.push_back(Res);
5788 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5789 llvm_unreachable("LowerOperation not implemented for this target!");
5790 return SDValue();
5794 void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5795 SDValue Op = getValue(V);
5796 assert((Op.getOpcode() != ISD::CopyFromReg ||
5797 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5798 "Copy from a reg to the same reg!");
5799 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5801 RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
5802 SDValue Chain = DAG.getEntryNode();
5803 RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
5804 PendingExports.push_back(Chain);
5807 #include "llvm/CodeGen/SelectionDAGISel.h"
5809 void SelectionDAGISel::
5810 LowerArguments(BasicBlock *LLVMBB) {
5811 // If this is the entry block, emit arguments.
5812 Function &F = *LLVMBB->getParent();
5813 SelectionDAG &DAG = SDL->DAG;
5814 SDValue OldRoot = DAG.getRoot();
5815 DebugLoc dl = SDL->getCurDebugLoc();
5816 const TargetData *TD = TLI.getTargetData();
5818 // Set up the incoming argument description vector.
5819 SmallVector<ISD::InputArg, 16> Ins;
5820 unsigned Idx = 1;
5821 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5822 I != E; ++I, ++Idx) {
5823 SmallVector<EVT, 4> ValueVTs;
5824 ComputeValueVTs(TLI, I->getType(), ValueVTs);
5825 bool isArgValueUsed = !I->use_empty();
5826 for (unsigned Value = 0, NumValues = ValueVTs.size();
5827 Value != NumValues; ++Value) {
5828 EVT VT = ValueVTs[Value];
5829 const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
5830 ISD::ArgFlagsTy Flags;
5831 unsigned OriginalAlignment =
5832 TD->getABITypeAlignment(ArgTy);
5834 if (F.paramHasAttr(Idx, Attribute::ZExt))
5835 Flags.setZExt();
5836 if (F.paramHasAttr(Idx, Attribute::SExt))
5837 Flags.setSExt();
5838 if (F.paramHasAttr(Idx, Attribute::InReg))
5839 Flags.setInReg();
5840 if (F.paramHasAttr(Idx, Attribute::StructRet))
5841 Flags.setSRet();
5842 if (F.paramHasAttr(Idx, Attribute::ByVal)) {
5843 Flags.setByVal();
5844 const PointerType *Ty = cast<PointerType>(I->getType());
5845 const Type *ElementTy = Ty->getElementType();
5846 unsigned FrameAlign = TLI.getByValTypeAlignment(ElementTy);
5847 unsigned FrameSize = TD->getTypeAllocSize(ElementTy);
5848 // For ByVal, alignment should be passed from FE. BE will guess if
5849 // this info is not there but there are cases it cannot get right.
5850 if (F.getParamAlignment(Idx))
5851 FrameAlign = F.getParamAlignment(Idx);
5852 Flags.setByValAlign(FrameAlign);
5853 Flags.setByValSize(FrameSize);
5855 if (F.paramHasAttr(Idx, Attribute::Nest))
5856 Flags.setNest();
5857 Flags.setOrigAlign(OriginalAlignment);
5859 EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5860 unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5861 for (unsigned i = 0; i != NumRegs; ++i) {
5862 ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
5863 if (NumRegs > 1 && i == 0)
5864 MyFlags.Flags.setSplit();
5865 // if it isn't first piece, alignment must be 1
5866 else if (i > 0)
5867 MyFlags.Flags.setOrigAlign(1);
5868 Ins.push_back(MyFlags);
5873 // Call the target to set up the argument values.
5874 SmallVector<SDValue, 8> InVals;
5875 SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
5876 F.isVarArg(), Ins,
5877 dl, DAG, InVals);
5879 // Verify that the target's LowerFormalArguments behaved as expected.
5880 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
5881 "LowerFormalArguments didn't return a valid chain!");
5882 assert(InVals.size() == Ins.size() &&
5883 "LowerFormalArguments didn't emit the correct number of values!");
5884 DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5885 assert(InVals[i].getNode() &&
5886 "LowerFormalArguments emitted a null value!");
5887 assert(Ins[i].VT == InVals[i].getValueType() &&
5888 "LowerFormalArguments emitted a value with the wrong type!");
5891 // Update the DAG with the new chain value resulting from argument lowering.
5892 DAG.setRoot(NewRoot);
5894 // Set up the argument values.
5895 unsigned i = 0;
5896 Idx = 1;
5897 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5898 ++I, ++Idx) {
5899 SmallVector<SDValue, 4> ArgValues;
5900 SmallVector<EVT, 4> ValueVTs;
5901 ComputeValueVTs(TLI, I->getType(), ValueVTs);
5902 unsigned NumValues = ValueVTs.size();
5903 for (unsigned Value = 0; Value != NumValues; ++Value) {
5904 EVT VT = ValueVTs[Value];
5905 EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5906 unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5908 if (!I->use_empty()) {
5909 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5910 if (F.paramHasAttr(Idx, Attribute::SExt))
5911 AssertOp = ISD::AssertSext;
5912 else if (F.paramHasAttr(Idx, Attribute::ZExt))
5913 AssertOp = ISD::AssertZext;
5915 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
5916 PartVT, VT, AssertOp));
5918 i += NumParts;
5920 if (!I->use_empty()) {
5921 SDL->setValue(I, DAG.getMergeValues(&ArgValues[0], NumValues,
5922 SDL->getCurDebugLoc()));
5923 // If this argument is live outside of the entry block, insert a copy from
5924 // whereever we got it to the vreg that other BB's will reference it as.
5925 SDL->CopyToExportRegsIfNeeded(I);
5928 assert(i == InVals.size() && "Argument register count mismatch!");
5930 // Finally, if the target has anything special to do, allow it to do so.
5931 // FIXME: this should insert code into the DAG!
5932 EmitFunctionEntryCode(F, SDL->DAG.getMachineFunction());
5935 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
5936 /// ensure constants are generated when needed. Remember the virtual registers
5937 /// that need to be added to the Machine PHI nodes as input. We cannot just
5938 /// directly add them, because expansion might result in multiple MBB's for one
5939 /// BB. As such, the start of the BB might correspond to a different MBB than
5940 /// the end.
5942 void
5943 SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5944 TerminatorInst *TI = LLVMBB->getTerminator();
5946 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5948 // Check successor nodes' PHI nodes that expect a constant to be available
5949 // from this block.
5950 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5951 BasicBlock *SuccBB = TI->getSuccessor(succ);
5952 if (!isa<PHINode>(SuccBB->begin())) continue;
5953 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5955 // If this terminator has multiple identical successors (common for
5956 // switches), only handle each succ once.
5957 if (!SuccsHandled.insert(SuccMBB)) continue;
5959 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5960 PHINode *PN;
5962 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5963 // nodes and Machine PHI nodes, but the incoming operands have not been
5964 // emitted yet.
5965 for (BasicBlock::iterator I = SuccBB->begin();
5966 (PN = dyn_cast<PHINode>(I)); ++I) {
5967 // Ignore dead phi's.
5968 if (PN->use_empty()) continue;
5970 unsigned Reg;
5971 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5973 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5974 unsigned &RegOut = SDL->ConstantsOut[C];
5975 if (RegOut == 0) {
5976 RegOut = FuncInfo->CreateRegForValue(C);
5977 SDL->CopyValueToVirtualRegister(C, RegOut);
5979 Reg = RegOut;
5980 } else {
5981 Reg = FuncInfo->ValueMap[PHIOp];
5982 if (Reg == 0) {
5983 assert(isa<AllocaInst>(PHIOp) &&
5984 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5985 "Didn't codegen value into a register!??");
5986 Reg = FuncInfo->CreateRegForValue(PHIOp);
5987 SDL->CopyValueToVirtualRegister(PHIOp, Reg);
5991 // Remember that this register needs to added to the machine PHI node as
5992 // the input for this MBB.
5993 SmallVector<EVT, 4> ValueVTs;
5994 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5995 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5996 EVT VT = ValueVTs[vti];
5997 unsigned NumRegisters = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5998 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
5999 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
6000 Reg += NumRegisters;
6004 SDL->ConstantsOut.clear();
6007 /// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
6008 /// supports legal types, and it emits MachineInstrs directly instead of
6009 /// creating SelectionDAG nodes.
6011 bool
6012 SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
6013 FastISel *F) {
6014 TerminatorInst *TI = LLVMBB->getTerminator();
6016 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
6017 unsigned OrigNumPHINodesToUpdate = SDL->PHINodesToUpdate.size();
6019 // Check successor nodes' PHI nodes that expect a constant to be available
6020 // from this block.
6021 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
6022 BasicBlock *SuccBB = TI->getSuccessor(succ);
6023 if (!isa<PHINode>(SuccBB->begin())) continue;
6024 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
6026 // If this terminator has multiple identical successors (common for
6027 // switches), only handle each succ once.
6028 if (!SuccsHandled.insert(SuccMBB)) continue;
6030 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
6031 PHINode *PN;
6033 // At this point we know that there is a 1-1 correspondence between LLVM PHI
6034 // nodes and Machine PHI nodes, but the incoming operands have not been
6035 // emitted yet.
6036 for (BasicBlock::iterator I = SuccBB->begin();
6037 (PN = dyn_cast<PHINode>(I)); ++I) {
6038 // Ignore dead phi's.
6039 if (PN->use_empty()) continue;
6041 // Only handle legal types. Two interesting things to note here. First,
6042 // by bailing out early, we may leave behind some dead instructions,
6043 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
6044 // own moves. Second, this check is necessary becuase FastISel doesn't
6045 // use CreateRegForValue to create registers, so it always creates
6046 // exactly one register for each non-void instruction.
6047 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
6048 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
6049 // Promote MVT::i1.
6050 if (VT == MVT::i1)
6051 VT = TLI.getTypeToTransformTo(*CurDAG->getContext(), VT);
6052 else {
6053 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6054 return false;
6058 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6060 unsigned Reg = F->getRegForValue(PHIOp);
6061 if (Reg == 0) {
6062 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6063 return false;
6065 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
6069 return true;