1 //===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SelectionDAG::Legalize method.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineJumpTableInfo.h"
18 #include "llvm/CodeGen/MachineModuleInfo.h"
19 #include "llvm/CodeGen/DwarfWriter.h"
20 #include "llvm/Analysis/DebugInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/Target/TargetFrameInfo.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/Target/TargetData.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetOptions.h"
27 #include "llvm/Target/TargetSubtarget.h"
28 #include "llvm/CallingConv.h"
29 #include "llvm/Constants.h"
30 #include "llvm/DerivedTypes.h"
31 #include "llvm/Function.h"
32 #include "llvm/GlobalVariable.h"
33 #include "llvm/LLVMContext.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/ADT/DenseMap.h"
39 #include "llvm/ADT/SmallVector.h"
40 #include "llvm/ADT/SmallPtrSet.h"
44 //===----------------------------------------------------------------------===//
45 /// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and
46 /// hacks on it until the target machine can handle it. This involves
47 /// eliminating value sizes the machine cannot handle (promoting small sizes to
48 /// large sizes or splitting up large values into small values) as well as
49 /// eliminating operations the machine cannot handle.
51 /// This code also does a small amount of optimization and recognition of idioms
52 /// as part of its processing. For example, if a target does not support a
53 /// 'setcc' instruction efficiently, but does support 'brcc' instruction, this
54 /// will attempt merge setcc and brc instructions into brcc's.
57 class VISIBILITY_HIDDEN SelectionDAGLegalize
{
60 CodeGenOpt::Level OptLevel
;
62 // Libcall insertion helpers.
64 /// LastCALLSEQ_END - This keeps track of the CALLSEQ_END node that has been
65 /// legalized. We use this to ensure that calls are properly serialized
66 /// against each other, including inserted libcalls.
67 SDValue LastCALLSEQ_END
;
69 /// IsLegalizingCall - This member is used *only* for purposes of providing
70 /// helpful assertions that a libcall isn't created while another call is
71 /// being legalized (which could lead to non-serialized call sequences).
72 bool IsLegalizingCall
;
75 Legal
, // The target natively supports this operation.
76 Promote
, // This operation should be executed in a larger type.
77 Expand
// Try to expand this to other ops, otherwise use a libcall.
80 /// ValueTypeActions - This is a bitvector that contains two bits for each
81 /// value type, where the two bits correspond to the LegalizeAction enum.
82 /// This can be queried with "getTypeAction(VT)".
83 TargetLowering::ValueTypeActionImpl ValueTypeActions
;
85 /// LegalizedNodes - For nodes that are of legal width, and that have more
86 /// than one use, this map indicates what regularized operand to use. This
87 /// allows us to avoid legalizing the same thing more than once.
88 DenseMap
<SDValue
, SDValue
> LegalizedNodes
;
90 void AddLegalizedOperand(SDValue From
, SDValue To
) {
91 LegalizedNodes
.insert(std::make_pair(From
, To
));
92 // If someone requests legalization of the new node, return itself.
94 LegalizedNodes
.insert(std::make_pair(To
, To
));
98 SelectionDAGLegalize(SelectionDAG
&DAG
, CodeGenOpt::Level ol
);
100 /// getTypeAction - Return how we should legalize values of this type, either
101 /// it is already legal or we need to expand it into multiple registers of
102 /// smaller integer type, or we need to promote it to a larger type.
103 LegalizeAction
getTypeAction(EVT VT
) const {
105 (LegalizeAction
)ValueTypeActions
.getTypeAction(*DAG
.getContext(), VT
);
108 /// isTypeLegal - Return true if this type is legal on this target.
110 bool isTypeLegal(EVT VT
) const {
111 return getTypeAction(VT
) == Legal
;
117 /// LegalizeOp - We know that the specified value has a legal type.
118 /// Recursively ensure that the operands have legal types, then return the
120 SDValue
LegalizeOp(SDValue O
);
122 SDValue
OptimizeFloatStore(StoreSDNode
*ST
);
124 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
125 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
126 /// is necessary to spill the vector being inserted into to memory, perform
127 /// the insert there, and then read the result back.
128 SDValue
PerformInsertVectorEltInMemory(SDValue Vec
, SDValue Val
,
129 SDValue Idx
, DebugLoc dl
);
130 SDValue
ExpandINSERT_VECTOR_ELT(SDValue Vec
, SDValue Val
,
131 SDValue Idx
, DebugLoc dl
);
133 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
134 /// performs the same shuffe in terms of order or result bytes, but on a type
135 /// whose vector element type is narrower than the original shuffle type.
136 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
137 SDValue
ShuffleWithNarrowerEltType(EVT NVT
, EVT VT
, DebugLoc dl
,
138 SDValue N1
, SDValue N2
,
139 SmallVectorImpl
<int> &Mask
) const;
141 bool LegalizeAllNodesNotLeadingTo(SDNode
*N
, SDNode
*Dest
,
142 SmallPtrSet
<SDNode
*, 32> &NodesLeadingTo
);
144 void LegalizeSetCCCondCode(EVT VT
, SDValue
&LHS
, SDValue
&RHS
, SDValue
&CC
,
147 SDValue
ExpandLibCall(RTLIB::Libcall LC
, SDNode
*Node
, bool isSigned
);
148 SDValue
ExpandFPLibCall(SDNode
*Node
, RTLIB::Libcall Call_F32
,
149 RTLIB::Libcall Call_F64
, RTLIB::Libcall Call_F80
,
150 RTLIB::Libcall Call_PPCF128
);
151 SDValue
ExpandIntLibCall(SDNode
*Node
, bool isSigned
, RTLIB::Libcall Call_I16
,
152 RTLIB::Libcall Call_I32
, RTLIB::Libcall Call_I64
,
153 RTLIB::Libcall Call_I128
);
155 SDValue
EmitStackConvert(SDValue SrcOp
, EVT SlotVT
, EVT DestVT
, DebugLoc dl
);
156 SDValue
ExpandBUILD_VECTOR(SDNode
*Node
);
157 SDValue
ExpandSCALAR_TO_VECTOR(SDNode
*Node
);
158 SDValue
ExpandDBG_STOPPOINT(SDNode
*Node
);
159 void ExpandDYNAMIC_STACKALLOC(SDNode
*Node
,
160 SmallVectorImpl
<SDValue
> &Results
);
161 SDValue
ExpandFCOPYSIGN(SDNode
*Node
);
162 SDValue
ExpandLegalINT_TO_FP(bool isSigned
, SDValue LegalOp
, EVT DestVT
,
164 SDValue
PromoteLegalINT_TO_FP(SDValue LegalOp
, EVT DestVT
, bool isSigned
,
166 SDValue
PromoteLegalFP_TO_INT(SDValue LegalOp
, EVT DestVT
, bool isSigned
,
169 SDValue
ExpandBSWAP(SDValue Op
, DebugLoc dl
);
170 SDValue
ExpandBitCount(unsigned Opc
, SDValue Op
, DebugLoc dl
);
172 SDValue
ExpandExtractFromVectorThroughStack(SDValue Op
);
173 SDValue
ExpandVectorBuildThroughStack(SDNode
* Node
);
175 void ExpandNode(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
176 void PromoteNode(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
180 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
181 /// performs the same shuffe in terms of order or result bytes, but on a type
182 /// whose vector element type is narrower than the original shuffle type.
183 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
185 SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT
, EVT VT
, DebugLoc dl
,
186 SDValue N1
, SDValue N2
,
187 SmallVectorImpl
<int> &Mask
) const {
188 EVT EltVT
= NVT
.getVectorElementType();
189 unsigned NumMaskElts
= VT
.getVectorNumElements();
190 unsigned NumDestElts
= NVT
.getVectorNumElements();
191 unsigned NumEltsGrowth
= NumDestElts
/ NumMaskElts
;
193 assert(NumEltsGrowth
&& "Cannot promote to vector type with fewer elts!");
195 if (NumEltsGrowth
== 1)
196 return DAG
.getVectorShuffle(NVT
, dl
, N1
, N2
, &Mask
[0]);
198 SmallVector
<int, 8> NewMask
;
199 for (unsigned i
= 0; i
!= NumMaskElts
; ++i
) {
201 for (unsigned j
= 0; j
!= NumEltsGrowth
; ++j
) {
203 NewMask
.push_back(-1);
205 NewMask
.push_back(Idx
* NumEltsGrowth
+ j
);
208 assert(NewMask
.size() == NumDestElts
&& "Non-integer NumEltsGrowth?");
209 assert(TLI
.isShuffleMaskLegal(NewMask
, NVT
) && "Shuffle not legal?");
210 return DAG
.getVectorShuffle(NVT
, dl
, N1
, N2
, &NewMask
[0]);
213 SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG
&dag
,
214 CodeGenOpt::Level ol
)
215 : TLI(dag
.getTargetLoweringInfo()), DAG(dag
), OptLevel(ol
),
216 ValueTypeActions(TLI
.getValueTypeActions()) {
217 assert(MVT::LAST_VALUETYPE
<= MVT::MAX_ALLOWED_VALUETYPE
&&
218 "Too many value types for ValueTypeActions to hold!");
221 void SelectionDAGLegalize::LegalizeDAG() {
222 LastCALLSEQ_END
= DAG
.getEntryNode();
223 IsLegalizingCall
= false;
225 // The legalize process is inherently a bottom-up recursive process (users
226 // legalize their uses before themselves). Given infinite stack space, we
227 // could just start legalizing on the root and traverse the whole graph. In
228 // practice however, this causes us to run out of stack space on large basic
229 // blocks. To avoid this problem, compute an ordering of the nodes where each
230 // node is only legalized after all of its operands are legalized.
231 DAG
.AssignTopologicalOrder();
232 for (SelectionDAG::allnodes_iterator I
= DAG
.allnodes_begin(),
233 E
= prior(DAG
.allnodes_end()); I
!= next(E
); ++I
)
234 LegalizeOp(SDValue(I
, 0));
236 // Finally, it's possible the root changed. Get the new root.
237 SDValue OldRoot
= DAG
.getRoot();
238 assert(LegalizedNodes
.count(OldRoot
) && "Root didn't get legalized?");
239 DAG
.setRoot(LegalizedNodes
[OldRoot
]);
241 LegalizedNodes
.clear();
243 // Remove dead nodes now.
244 DAG
.RemoveDeadNodes();
248 /// FindCallEndFromCallStart - Given a chained node that is part of a call
249 /// sequence, find the CALLSEQ_END node that terminates the call sequence.
250 static SDNode
*FindCallEndFromCallStart(SDNode
*Node
) {
251 if (Node
->getOpcode() == ISD::CALLSEQ_END
)
253 if (Node
->use_empty())
254 return 0; // No CallSeqEnd
256 // The chain is usually at the end.
257 SDValue
TheChain(Node
, Node
->getNumValues()-1);
258 if (TheChain
.getValueType() != MVT::Other
) {
259 // Sometimes it's at the beginning.
260 TheChain
= SDValue(Node
, 0);
261 if (TheChain
.getValueType() != MVT::Other
) {
262 // Otherwise, hunt for it.
263 for (unsigned i
= 1, e
= Node
->getNumValues(); i
!= e
; ++i
)
264 if (Node
->getValueType(i
) == MVT::Other
) {
265 TheChain
= SDValue(Node
, i
);
269 // Otherwise, we walked into a node without a chain.
270 if (TheChain
.getValueType() != MVT::Other
)
275 for (SDNode::use_iterator UI
= Node
->use_begin(),
276 E
= Node
->use_end(); UI
!= E
; ++UI
) {
278 // Make sure to only follow users of our token chain.
280 for (unsigned i
= 0, e
= User
->getNumOperands(); i
!= e
; ++i
)
281 if (User
->getOperand(i
) == TheChain
)
282 if (SDNode
*Result
= FindCallEndFromCallStart(User
))
288 /// FindCallStartFromCallEnd - Given a chained node that is part of a call
289 /// sequence, find the CALLSEQ_START node that initiates the call sequence.
290 static SDNode
*FindCallStartFromCallEnd(SDNode
*Node
) {
291 assert(Node
&& "Didn't find callseq_start for a call??");
292 if (Node
->getOpcode() == ISD::CALLSEQ_START
) return Node
;
294 assert(Node
->getOperand(0).getValueType() == MVT::Other
&&
295 "Node doesn't have a token chain argument!");
296 return FindCallStartFromCallEnd(Node
->getOperand(0).getNode());
299 /// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to
300 /// see if any uses can reach Dest. If no dest operands can get to dest,
301 /// legalize them, legalize ourself, and return false, otherwise, return true.
303 /// Keep track of the nodes we fine that actually do lead to Dest in
304 /// NodesLeadingTo. This avoids retraversing them exponential number of times.
306 bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode
*N
, SDNode
*Dest
,
307 SmallPtrSet
<SDNode
*, 32> &NodesLeadingTo
) {
308 if (N
== Dest
) return true; // N certainly leads to Dest :)
310 // If we've already processed this node and it does lead to Dest, there is no
311 // need to reprocess it.
312 if (NodesLeadingTo
.count(N
)) return true;
314 // If the first result of this node has been already legalized, then it cannot
316 if (LegalizedNodes
.count(SDValue(N
, 0))) return false;
318 // Okay, this node has not already been legalized. Check and legalize all
319 // operands. If none lead to Dest, then we can legalize this node.
320 bool OperandsLeadToDest
= false;
321 for (unsigned i
= 0, e
= N
->getNumOperands(); i
!= e
; ++i
)
322 OperandsLeadToDest
|= // If an operand leads to Dest, so do we.
323 LegalizeAllNodesNotLeadingTo(N
->getOperand(i
).getNode(), Dest
, NodesLeadingTo
);
325 if (OperandsLeadToDest
) {
326 NodesLeadingTo
.insert(N
);
330 // Okay, this node looks safe, legalize it and return false.
331 LegalizeOp(SDValue(N
, 0));
335 /// ExpandConstantFP - Expands the ConstantFP node to an integer constant or
336 /// a load from the constant pool.
337 static SDValue
ExpandConstantFP(ConstantFPSDNode
*CFP
, bool UseCP
,
338 SelectionDAG
&DAG
, const TargetLowering
&TLI
) {
340 DebugLoc dl
= CFP
->getDebugLoc();
342 // If a FP immediate is precise when represented as a float and if the
343 // target can do an extending load from float to double, we put it into
344 // the constant pool as a float, even if it's is statically typed as a
345 // double. This shrinks FP constants and canonicalizes them for targets where
346 // an FP extending load is the same cost as a normal load (such as on the x87
347 // fp stack or PPC FP unit).
348 EVT VT
= CFP
->getValueType(0);
349 ConstantFP
*LLVMC
= const_cast<ConstantFP
*>(CFP
->getConstantFPValue());
351 assert((VT
== MVT::f64
|| VT
== MVT::f32
) && "Invalid type expansion");
352 return DAG
.getConstant(LLVMC
->getValueAPF().bitcastToAPInt(),
353 (VT
== MVT::f64
) ? MVT::i64
: MVT::i32
);
358 while (SVT
!= MVT::f32
) {
359 SVT
= (MVT::SimpleValueType
)(SVT
.getSimpleVT().SimpleTy
- 1);
360 if (CFP
->isValueValidForType(SVT
, CFP
->getValueAPF()) &&
361 // Only do this if the target has a native EXTLOAD instruction from
363 TLI
.isLoadExtLegal(ISD::EXTLOAD
, SVT
) &&
364 TLI
.ShouldShrinkFPConstant(OrigVT
)) {
365 const Type
*SType
= SVT
.getTypeForEVT(*DAG
.getContext());
366 LLVMC
= cast
<ConstantFP
>(ConstantExpr::getFPTrunc(LLVMC
, SType
));
372 SDValue CPIdx
= DAG
.getConstantPool(LLVMC
, TLI
.getPointerTy());
373 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
375 return DAG
.getExtLoad(ISD::EXTLOAD
, dl
,
376 OrigVT
, DAG
.getEntryNode(),
377 CPIdx
, PseudoSourceValue::getConstantPool(),
378 0, VT
, false, Alignment
);
379 return DAG
.getLoad(OrigVT
, dl
, DAG
.getEntryNode(), CPIdx
,
380 PseudoSourceValue::getConstantPool(), 0, false, Alignment
);
383 /// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores.
385 SDValue
ExpandUnalignedStore(StoreSDNode
*ST
, SelectionDAG
&DAG
,
386 const TargetLowering
&TLI
) {
387 SDValue Chain
= ST
->getChain();
388 SDValue Ptr
= ST
->getBasePtr();
389 SDValue Val
= ST
->getValue();
390 EVT VT
= Val
.getValueType();
391 int Alignment
= ST
->getAlignment();
392 int SVOffset
= ST
->getSrcValueOffset();
393 DebugLoc dl
= ST
->getDebugLoc();
394 if (ST
->getMemoryVT().isFloatingPoint() ||
395 ST
->getMemoryVT().isVector()) {
396 EVT intVT
= EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits());
397 if (TLI
.isTypeLegal(intVT
)) {
398 // Expand to a bitconvert of the value to the integer type of the
399 // same size, then a (misaligned) int store.
400 // FIXME: Does not handle truncating floating point stores!
401 SDValue Result
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, intVT
, Val
);
402 return DAG
.getStore(Chain
, dl
, Result
, Ptr
, ST
->getSrcValue(),
403 SVOffset
, ST
->isVolatile(), Alignment
);
405 // Do a (aligned) store to a stack slot, then copy from the stack slot
406 // to the final destination using (unaligned) integer loads and stores.
407 EVT StoredVT
= ST
->getMemoryVT();
409 TLI
.getRegisterType(*DAG
.getContext(), EVT::getIntegerVT(*DAG
.getContext(), StoredVT
.getSizeInBits()));
410 unsigned StoredBytes
= StoredVT
.getSizeInBits() / 8;
411 unsigned RegBytes
= RegVT
.getSizeInBits() / 8;
412 unsigned NumRegs
= (StoredBytes
+ RegBytes
- 1) / RegBytes
;
414 // Make sure the stack slot is also aligned for the register type.
415 SDValue StackPtr
= DAG
.CreateStackTemporary(StoredVT
, RegVT
);
417 // Perform the original store, only redirected to the stack slot.
418 SDValue Store
= DAG
.getTruncStore(Chain
, dl
,
419 Val
, StackPtr
, NULL
, 0, StoredVT
);
420 SDValue Increment
= DAG
.getConstant(RegBytes
, TLI
.getPointerTy());
421 SmallVector
<SDValue
, 8> Stores
;
424 // Do all but one copies using the full register width.
425 for (unsigned i
= 1; i
< NumRegs
; i
++) {
426 // Load one integer register's worth from the stack slot.
427 SDValue Load
= DAG
.getLoad(RegVT
, dl
, Store
, StackPtr
, NULL
, 0);
428 // Store it to the final location. Remember the store.
429 Stores
.push_back(DAG
.getStore(Load
.getValue(1), dl
, Load
, Ptr
,
430 ST
->getSrcValue(), SVOffset
+ Offset
,
432 MinAlign(ST
->getAlignment(), Offset
)));
433 // Increment the pointers.
435 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(), StackPtr
,
437 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
, Increment
);
440 // The last store may be partial. Do a truncating store. On big-endian
441 // machines this requires an extending load from the stack slot to ensure
442 // that the bits are in the right place.
443 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(), 8 * (StoredBytes
- Offset
));
445 // Load from the stack slot.
446 SDValue Load
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
, RegVT
, Store
, StackPtr
,
449 Stores
.push_back(DAG
.getTruncStore(Load
.getValue(1), dl
, Load
, Ptr
,
450 ST
->getSrcValue(), SVOffset
+ Offset
,
451 MemVT
, ST
->isVolatile(),
452 MinAlign(ST
->getAlignment(), Offset
)));
453 // The order of the stores doesn't matter - say it with a TokenFactor.
454 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, &Stores
[0],
458 assert(ST
->getMemoryVT().isInteger() &&
459 !ST
->getMemoryVT().isVector() &&
460 "Unaligned store of unknown type.");
461 // Get the half-size VT
463 (MVT::SimpleValueType
)(ST
->getMemoryVT().getSimpleVT().SimpleTy
- 1);
464 int NumBits
= NewStoredVT
.getSizeInBits();
465 int IncrementSize
= NumBits
/ 8;
467 // Divide the stored value in two parts.
468 SDValue ShiftAmount
= DAG
.getConstant(NumBits
, TLI
.getShiftAmountTy());
470 SDValue Hi
= DAG
.getNode(ISD::SRL
, dl
, VT
, Val
, ShiftAmount
);
472 // Store the two parts
473 SDValue Store1
, Store2
;
474 Store1
= DAG
.getTruncStore(Chain
, dl
, TLI
.isLittleEndian()?Lo
:Hi
, Ptr
,
475 ST
->getSrcValue(), SVOffset
, NewStoredVT
,
476 ST
->isVolatile(), Alignment
);
477 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
478 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
479 Alignment
= MinAlign(Alignment
, IncrementSize
);
480 Store2
= DAG
.getTruncStore(Chain
, dl
, TLI
.isLittleEndian()?Hi
:Lo
, Ptr
,
481 ST
->getSrcValue(), SVOffset
+ IncrementSize
,
482 NewStoredVT
, ST
->isVolatile(), Alignment
);
484 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Store1
, Store2
);
487 /// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads.
489 SDValue
ExpandUnalignedLoad(LoadSDNode
*LD
, SelectionDAG
&DAG
,
490 const TargetLowering
&TLI
) {
491 int SVOffset
= LD
->getSrcValueOffset();
492 SDValue Chain
= LD
->getChain();
493 SDValue Ptr
= LD
->getBasePtr();
494 EVT VT
= LD
->getValueType(0);
495 EVT LoadedVT
= LD
->getMemoryVT();
496 DebugLoc dl
= LD
->getDebugLoc();
497 if (VT
.isFloatingPoint() || VT
.isVector()) {
498 EVT intVT
= EVT::getIntegerVT(*DAG
.getContext(), LoadedVT
.getSizeInBits());
499 if (TLI
.isTypeLegal(intVT
)) {
500 // Expand to a (misaligned) integer load of the same size,
501 // then bitconvert to floating point or vector.
502 SDValue newLoad
= DAG
.getLoad(intVT
, dl
, Chain
, Ptr
, LD
->getSrcValue(),
503 SVOffset
, LD
->isVolatile(),
505 SDValue Result
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, LoadedVT
, newLoad
);
506 if (VT
.isFloatingPoint() && LoadedVT
!= VT
)
507 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
, VT
, Result
);
509 SDValue Ops
[] = { Result
, Chain
};
510 return DAG
.getMergeValues(Ops
, 2, dl
);
512 // Copy the value to a (aligned) stack slot using (unaligned) integer
513 // loads and stores, then do a (aligned) load from the stack slot.
514 EVT RegVT
= TLI
.getRegisterType(*DAG
.getContext(), intVT
);
515 unsigned LoadedBytes
= LoadedVT
.getSizeInBits() / 8;
516 unsigned RegBytes
= RegVT
.getSizeInBits() / 8;
517 unsigned NumRegs
= (LoadedBytes
+ RegBytes
- 1) / RegBytes
;
519 // Make sure the stack slot is also aligned for the register type.
520 SDValue StackBase
= DAG
.CreateStackTemporary(LoadedVT
, RegVT
);
522 SDValue Increment
= DAG
.getConstant(RegBytes
, TLI
.getPointerTy());
523 SmallVector
<SDValue
, 8> Stores
;
524 SDValue StackPtr
= StackBase
;
527 // Do all but one copies using the full register width.
528 for (unsigned i
= 1; i
< NumRegs
; i
++) {
529 // Load one integer register's worth from the original location.
530 SDValue Load
= DAG
.getLoad(RegVT
, dl
, Chain
, Ptr
, LD
->getSrcValue(),
531 SVOffset
+ Offset
, LD
->isVolatile(),
532 MinAlign(LD
->getAlignment(), Offset
));
533 // Follow the load with a store to the stack slot. Remember the store.
534 Stores
.push_back(DAG
.getStore(Load
.getValue(1), dl
, Load
, StackPtr
,
536 // Increment the pointers.
538 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
, Increment
);
539 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(), StackPtr
,
543 // The last copy may be partial. Do an extending load.
544 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(), 8 * (LoadedBytes
- Offset
));
545 SDValue Load
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
, RegVT
, Chain
, Ptr
,
546 LD
->getSrcValue(), SVOffset
+ Offset
,
547 MemVT
, LD
->isVolatile(),
548 MinAlign(LD
->getAlignment(), Offset
));
549 // Follow the load with a store to the stack slot. Remember the store.
550 // On big-endian machines this requires a truncating store to ensure
551 // that the bits end up in the right place.
552 Stores
.push_back(DAG
.getTruncStore(Load
.getValue(1), dl
, Load
, StackPtr
,
555 // The order of the stores doesn't matter - say it with a TokenFactor.
556 SDValue TF
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, &Stores
[0],
559 // Finally, perform the original load only redirected to the stack slot.
560 Load
= DAG
.getExtLoad(LD
->getExtensionType(), dl
, VT
, TF
, StackBase
,
563 // Callers expect a MERGE_VALUES node.
564 SDValue Ops
[] = { Load
, TF
};
565 return DAG
.getMergeValues(Ops
, 2, dl
);
568 assert(LoadedVT
.isInteger() && !LoadedVT
.isVector() &&
569 "Unaligned load of unsupported type.");
571 // Compute the new VT that is half the size of the old one. This is an
573 unsigned NumBits
= LoadedVT
.getSizeInBits();
575 NewLoadedVT
= EVT::getIntegerVT(*DAG
.getContext(), NumBits
/2);
578 unsigned Alignment
= LD
->getAlignment();
579 unsigned IncrementSize
= NumBits
/ 8;
580 ISD::LoadExtType HiExtType
= LD
->getExtensionType();
582 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
583 if (HiExtType
== ISD::NON_EXTLOAD
)
584 HiExtType
= ISD::ZEXTLOAD
;
586 // Load the value in two parts
588 if (TLI
.isLittleEndian()) {
589 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
, VT
, Chain
, Ptr
, LD
->getSrcValue(),
590 SVOffset
, NewLoadedVT
, LD
->isVolatile(), Alignment
);
591 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
592 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
593 Hi
= DAG
.getExtLoad(HiExtType
, dl
, VT
, Chain
, Ptr
, LD
->getSrcValue(),
594 SVOffset
+ IncrementSize
, NewLoadedVT
, LD
->isVolatile(),
595 MinAlign(Alignment
, IncrementSize
));
597 Hi
= DAG
.getExtLoad(HiExtType
, dl
, VT
, Chain
, Ptr
, LD
->getSrcValue(),
598 SVOffset
, NewLoadedVT
, LD
->isVolatile(), Alignment
);
599 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
600 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
601 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
, VT
, Chain
, Ptr
, LD
->getSrcValue(),
602 SVOffset
+ IncrementSize
, NewLoadedVT
, LD
->isVolatile(),
603 MinAlign(Alignment
, IncrementSize
));
606 // aggregate the two parts
607 SDValue ShiftAmount
= DAG
.getConstant(NumBits
, TLI
.getShiftAmountTy());
608 SDValue Result
= DAG
.getNode(ISD::SHL
, dl
, VT
, Hi
, ShiftAmount
);
609 Result
= DAG
.getNode(ISD::OR
, dl
, VT
, Result
, Lo
);
611 SDValue TF
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
614 SDValue Ops
[] = { Result
, TF
};
615 return DAG
.getMergeValues(Ops
, 2, dl
);
618 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
619 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
620 /// is necessary to spill the vector being inserted into to memory, perform
621 /// the insert there, and then read the result back.
622 SDValue
SelectionDAGLegalize::
623 PerformInsertVectorEltInMemory(SDValue Vec
, SDValue Val
, SDValue Idx
,
629 // If the target doesn't support this, we have to spill the input vector
630 // to a temporary stack slot, update the element, then reload it. This is
631 // badness. We could also load the value into a vector register (either
632 // with a "move to register" or "extload into register" instruction, then
633 // permute it into place, if the idx is a constant and if the idx is
634 // supported by the target.
635 EVT VT
= Tmp1
.getValueType();
636 EVT EltVT
= VT
.getVectorElementType();
637 EVT IdxVT
= Tmp3
.getValueType();
638 EVT PtrVT
= TLI
.getPointerTy();
639 SDValue StackPtr
= DAG
.CreateStackTemporary(VT
);
641 int SPFI
= cast
<FrameIndexSDNode
>(StackPtr
.getNode())->getIndex();
644 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Tmp1
, StackPtr
,
645 PseudoSourceValue::getFixedStack(SPFI
), 0);
647 // Truncate or zero extend offset to target pointer type.
648 unsigned CastOpc
= IdxVT
.bitsGT(PtrVT
) ? ISD::TRUNCATE
: ISD::ZERO_EXTEND
;
649 Tmp3
= DAG
.getNode(CastOpc
, dl
, PtrVT
, Tmp3
);
650 // Add the offset to the index.
651 unsigned EltSize
= EltVT
.getSizeInBits()/8;
652 Tmp3
= DAG
.getNode(ISD::MUL
, dl
, IdxVT
, Tmp3
,DAG
.getConstant(EltSize
, IdxVT
));
653 SDValue StackPtr2
= DAG
.getNode(ISD::ADD
, dl
, IdxVT
, Tmp3
, StackPtr
);
654 // Store the scalar value.
655 Ch
= DAG
.getTruncStore(Ch
, dl
, Tmp2
, StackPtr2
,
656 PseudoSourceValue::getFixedStack(SPFI
), 0, EltVT
);
657 // Load the updated vector.
658 return DAG
.getLoad(VT
, dl
, Ch
, StackPtr
,
659 PseudoSourceValue::getFixedStack(SPFI
), 0);
663 SDValue
SelectionDAGLegalize::
664 ExpandINSERT_VECTOR_ELT(SDValue Vec
, SDValue Val
, SDValue Idx
, DebugLoc dl
) {
665 if (ConstantSDNode
*InsertPos
= dyn_cast
<ConstantSDNode
>(Idx
)) {
666 // SCALAR_TO_VECTOR requires that the type of the value being inserted
667 // match the element type of the vector being created, except for
668 // integers in which case the inserted value can be over width.
669 EVT EltVT
= Vec
.getValueType().getVectorElementType();
670 if (Val
.getValueType() == EltVT
||
671 (EltVT
.isInteger() && Val
.getValueType().bitsGE(EltVT
))) {
672 SDValue ScVec
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
,
673 Vec
.getValueType(), Val
);
675 unsigned NumElts
= Vec
.getValueType().getVectorNumElements();
676 // We generate a shuffle of InVec and ScVec, so the shuffle mask
677 // should be 0,1,2,3,4,5... with the appropriate element replaced with
679 SmallVector
<int, 8> ShufOps
;
680 for (unsigned i
= 0; i
!= NumElts
; ++i
)
681 ShufOps
.push_back(i
!= InsertPos
->getZExtValue() ? i
: NumElts
);
683 return DAG
.getVectorShuffle(Vec
.getValueType(), dl
, Vec
, ScVec
,
687 return PerformInsertVectorEltInMemory(Vec
, Val
, Idx
, dl
);
690 SDValue
SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode
* ST
) {
691 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
692 // FIXME: We shouldn't do this for TargetConstantFP's.
693 // FIXME: move this to the DAG Combiner! Note that we can't regress due
694 // to phase ordering between legalized code and the dag combiner. This
695 // probably means that we need to integrate dag combiner and legalizer
697 // We generally can't do this one for long doubles.
698 SDValue Tmp1
= ST
->getChain();
699 SDValue Tmp2
= ST
->getBasePtr();
701 int SVOffset
= ST
->getSrcValueOffset();
702 unsigned Alignment
= ST
->getAlignment();
703 bool isVolatile
= ST
->isVolatile();
704 DebugLoc dl
= ST
->getDebugLoc();
705 if (ConstantFPSDNode
*CFP
= dyn_cast
<ConstantFPSDNode
>(ST
->getValue())) {
706 if (CFP
->getValueType(0) == MVT::f32
&&
707 getTypeAction(MVT::i32
) == Legal
) {
708 Tmp3
= DAG
.getConstant(CFP
->getValueAPF().
709 bitcastToAPInt().zextOrTrunc(32),
711 return DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
712 SVOffset
, isVolatile
, Alignment
);
713 } else if (CFP
->getValueType(0) == MVT::f64
) {
714 // If this target supports 64-bit registers, do a single 64-bit store.
715 if (getTypeAction(MVT::i64
) == Legal
) {
716 Tmp3
= DAG
.getConstant(CFP
->getValueAPF().bitcastToAPInt().
717 zextOrTrunc(64), MVT::i64
);
718 return DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
719 SVOffset
, isVolatile
, Alignment
);
720 } else if (getTypeAction(MVT::i32
) == Legal
&& !ST
->isVolatile()) {
721 // Otherwise, if the target supports 32-bit registers, use 2 32-bit
722 // stores. If the target supports neither 32- nor 64-bits, this
723 // xform is certainly not worth it.
724 const APInt
&IntVal
=CFP
->getValueAPF().bitcastToAPInt();
725 SDValue Lo
= DAG
.getConstant(APInt(IntVal
).trunc(32), MVT::i32
);
726 SDValue Hi
= DAG
.getConstant(IntVal
.lshr(32).trunc(32), MVT::i32
);
727 if (TLI
.isBigEndian()) std::swap(Lo
, Hi
);
729 Lo
= DAG
.getStore(Tmp1
, dl
, Lo
, Tmp2
, ST
->getSrcValue(),
730 SVOffset
, isVolatile
, Alignment
);
731 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
732 DAG
.getIntPtrConstant(4));
733 Hi
= DAG
.getStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getSrcValue(), SVOffset
+4,
734 isVolatile
, MinAlign(Alignment
, 4U));
736 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
, Hi
);
743 /// LegalizeOp - We know that the specified value has a legal type, and
744 /// that its operands are legal. Now ensure that the operation itself
745 /// is legal, recursively ensuring that the operands' operations remain
747 SDValue
SelectionDAGLegalize::LegalizeOp(SDValue Op
) {
748 if (Op
.getOpcode() == ISD::TargetConstant
) // Allow illegal target nodes.
751 SDNode
*Node
= Op
.getNode();
752 DebugLoc dl
= Node
->getDebugLoc();
754 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
755 assert(getTypeAction(Node
->getValueType(i
)) == Legal
&&
756 "Unexpected illegal type!");
758 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
)
759 assert((isTypeLegal(Node
->getOperand(i
).getValueType()) ||
760 Node
->getOperand(i
).getOpcode() == ISD::TargetConstant
) &&
761 "Unexpected illegal type!");
763 // Note that LegalizeOp may be reentered even from single-use nodes, which
764 // means that we always must cache transformed nodes.
765 DenseMap
<SDValue
, SDValue
>::iterator I
= LegalizedNodes
.find(Op
);
766 if (I
!= LegalizedNodes
.end()) return I
->second
;
768 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
;
770 bool isCustom
= false;
772 // Figure out the correct action; the way to query this varies by opcode
773 TargetLowering::LegalizeAction Action
;
774 bool SimpleFinishLegalizing
= true;
775 switch (Node
->getOpcode()) {
776 case ISD::INTRINSIC_W_CHAIN
:
777 case ISD::INTRINSIC_WO_CHAIN
:
778 case ISD::INTRINSIC_VOID
:
781 Action
= TLI
.getOperationAction(Node
->getOpcode(), MVT::Other
);
783 case ISD::SINT_TO_FP
:
784 case ISD::UINT_TO_FP
:
785 case ISD::EXTRACT_VECTOR_ELT
:
786 Action
= TLI
.getOperationAction(Node
->getOpcode(),
787 Node
->getOperand(0).getValueType());
789 case ISD::FP_ROUND_INREG
:
790 case ISD::SIGN_EXTEND_INREG
: {
791 EVT InnerType
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
792 Action
= TLI
.getOperationAction(Node
->getOpcode(), InnerType
);
798 unsigned CCOperand
= Node
->getOpcode() == ISD::SELECT_CC
? 4 :
799 Node
->getOpcode() == ISD::SETCC
? 2 : 1;
800 unsigned CompareOperand
= Node
->getOpcode() == ISD::BR_CC
? 2 : 0;
801 EVT OpVT
= Node
->getOperand(CompareOperand
).getValueType();
802 ISD::CondCode CCCode
=
803 cast
<CondCodeSDNode
>(Node
->getOperand(CCOperand
))->get();
804 Action
= TLI
.getCondCodeAction(CCCode
, OpVT
);
805 if (Action
== TargetLowering::Legal
) {
806 if (Node
->getOpcode() == ISD::SELECT_CC
)
807 Action
= TLI
.getOperationAction(Node
->getOpcode(),
808 Node
->getValueType(0));
810 Action
= TLI
.getOperationAction(Node
->getOpcode(), OpVT
);
816 // FIXME: Model these properly. LOAD and STORE are complicated, and
817 // STORE expects the unlegalized operand in some cases.
818 SimpleFinishLegalizing
= false;
820 case ISD::CALLSEQ_START
:
821 case ISD::CALLSEQ_END
:
822 // FIXME: This shouldn't be necessary. These nodes have special properties
823 // dealing with the recursive nature of legalization. Removing this
824 // special case should be done as part of making LegalizeDAG non-recursive.
825 SimpleFinishLegalizing
= false;
827 case ISD::EXTRACT_ELEMENT
:
828 case ISD::FLT_ROUNDS_
:
836 case ISD::MERGE_VALUES
:
838 case ISD::FRAME_TO_ARGS_OFFSET
:
839 // These operations lie about being legal: when they claim to be legal,
840 // they should actually be expanded.
841 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
842 if (Action
== TargetLowering::Legal
)
843 Action
= TargetLowering::Expand
;
845 case ISD::TRAMPOLINE
:
847 case ISD::RETURNADDR
:
848 // These operations lie about being legal: when they claim to be legal,
849 // they should actually be custom-lowered.
850 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
851 if (Action
== TargetLowering::Legal
)
852 Action
= TargetLowering::Custom
;
854 case ISD::BUILD_VECTOR
:
855 // A weird case: legalization for BUILD_VECTOR never legalizes the
857 // FIXME: This really sucks... changing it isn't semantically incorrect,
858 // but it massively pessimizes the code for floating-point BUILD_VECTORs
859 // because ConstantFP operands get legalized into constant pool loads
860 // before the BUILD_VECTOR code can see them. It doesn't usually bite,
861 // though, because BUILD_VECTORS usually get lowered into other nodes
862 // which get legalized properly.
863 SimpleFinishLegalizing
= false;
866 if (Node
->getOpcode() >= ISD::BUILTIN_OP_END
) {
867 Action
= TargetLowering::Legal
;
869 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
874 if (SimpleFinishLegalizing
) {
875 SmallVector
<SDValue
, 8> Ops
, ResultVals
;
876 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
)
877 Ops
.push_back(LegalizeOp(Node
->getOperand(i
)));
878 switch (Node
->getOpcode()) {
885 // Branches tweak the chain to include LastCALLSEQ_END
886 Ops
[0] = DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Ops
[0],
888 Ops
[0] = LegalizeOp(Ops
[0]);
889 LastCALLSEQ_END
= DAG
.getEntryNode();
896 // Legalizing shifts/rotates requires adjusting the shift amount
897 // to the appropriate width.
898 if (!Ops
[1].getValueType().isVector())
899 Ops
[1] = LegalizeOp(DAG
.getShiftAmountOperand(Ops
[1]));
903 Result
= DAG
.UpdateNodeOperands(Result
.getValue(0), Ops
.data(),
906 case TargetLowering::Legal
:
907 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
908 ResultVals
.push_back(Result
.getValue(i
));
910 case TargetLowering::Custom
:
911 // FIXME: The handling for custom lowering with multiple results is
913 Tmp1
= TLI
.LowerOperation(Result
, DAG
);
914 if (Tmp1
.getNode()) {
915 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
) {
917 ResultVals
.push_back(Tmp1
);
919 ResultVals
.push_back(Tmp1
.getValue(i
));
925 case TargetLowering::Expand
:
926 ExpandNode(Result
.getNode(), ResultVals
);
928 case TargetLowering::Promote
:
929 PromoteNode(Result
.getNode(), ResultVals
);
932 if (!ResultVals
.empty()) {
933 for (unsigned i
= 0, e
= ResultVals
.size(); i
!= e
; ++i
) {
934 if (ResultVals
[i
] != SDValue(Node
, i
))
935 ResultVals
[i
] = LegalizeOp(ResultVals
[i
]);
936 AddLegalizedOperand(SDValue(Node
, i
), ResultVals
[i
]);
938 return ResultVals
[Op
.getResNo()];
942 switch (Node
->getOpcode()) {
945 cerr
<< "NODE: "; Node
->dump(&DAG
); cerr
<< "\n";
947 llvm_unreachable("Do not know how to legalize this operator!");
949 case ISD::BUILD_VECTOR
:
950 switch (TLI
.getOperationAction(ISD::BUILD_VECTOR
, Node
->getValueType(0))) {
951 default: llvm_unreachable("This action is not supported yet!");
952 case TargetLowering::Custom
:
953 Tmp3
= TLI
.LowerOperation(Result
, DAG
);
954 if (Tmp3
.getNode()) {
959 case TargetLowering::Expand
:
960 Result
= ExpandBUILD_VECTOR(Result
.getNode());
964 case ISD::CALLSEQ_START
: {
965 SDNode
*CallEnd
= FindCallEndFromCallStart(Node
);
967 // Recursively Legalize all of the inputs of the call end that do not lead
968 // to this call start. This ensures that any libcalls that need be inserted
969 // are inserted *before* the CALLSEQ_START.
970 {SmallPtrSet
<SDNode
*, 32> NodesLeadingTo
;
971 for (unsigned i
= 0, e
= CallEnd
->getNumOperands(); i
!= e
; ++i
)
972 LegalizeAllNodesNotLeadingTo(CallEnd
->getOperand(i
).getNode(), Node
,
976 // Now that we legalized all of the inputs (which may have inserted
977 // libcalls) create the new CALLSEQ_START node.
978 Tmp1
= LegalizeOp(Node
->getOperand(0)); // Legalize the chain.
980 // Merge in the last call, to ensure that this call start after the last
982 if (LastCALLSEQ_END
.getOpcode() != ISD::EntryToken
) {
983 Tmp1
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
984 Tmp1
, LastCALLSEQ_END
);
985 Tmp1
= LegalizeOp(Tmp1
);
988 // Do not try to legalize the target-specific arguments (#1+).
989 if (Tmp1
!= Node
->getOperand(0)) {
990 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
992 Result
= DAG
.UpdateNodeOperands(Result
, &Ops
[0], Ops
.size());
995 // Remember that the CALLSEQ_START is legalized.
996 AddLegalizedOperand(Op
.getValue(0), Result
);
997 if (Node
->getNumValues() == 2) // If this has a flag result, remember it.
998 AddLegalizedOperand(Op
.getValue(1), Result
.getValue(1));
1000 // Now that the callseq_start and all of the non-call nodes above this call
1001 // sequence have been legalized, legalize the call itself. During this
1002 // process, no libcalls can/will be inserted, guaranteeing that no calls
1004 assert(!IsLegalizingCall
&& "Inconsistent sequentialization of calls!");
1005 // Note that we are selecting this call!
1006 LastCALLSEQ_END
= SDValue(CallEnd
, 0);
1007 IsLegalizingCall
= true;
1009 // Legalize the call, starting from the CALLSEQ_END.
1010 LegalizeOp(LastCALLSEQ_END
);
1011 assert(!IsLegalizingCall
&& "CALLSEQ_END should have cleared this!");
1014 case ISD::CALLSEQ_END
:
1015 // If the CALLSEQ_START node hasn't been legalized first, legalize it. This
1016 // will cause this node to be legalized as well as handling libcalls right.
1017 if (LastCALLSEQ_END
.getNode() != Node
) {
1018 LegalizeOp(SDValue(FindCallStartFromCallEnd(Node
), 0));
1019 DenseMap
<SDValue
, SDValue
>::iterator I
= LegalizedNodes
.find(Op
);
1020 assert(I
!= LegalizedNodes
.end() &&
1021 "Legalizing the call start should have legalized this node!");
1025 // Otherwise, the call start has been legalized and everything is going
1026 // according to plan. Just legalize ourselves normally here.
1027 Tmp1
= LegalizeOp(Node
->getOperand(0)); // Legalize the chain.
1028 // Do not try to legalize the target-specific arguments (#1+), except for
1029 // an optional flag input.
1030 if (Node
->getOperand(Node
->getNumOperands()-1).getValueType() != MVT::Flag
){
1031 if (Tmp1
!= Node
->getOperand(0)) {
1032 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1034 Result
= DAG
.UpdateNodeOperands(Result
, &Ops
[0], Ops
.size());
1037 Tmp2
= LegalizeOp(Node
->getOperand(Node
->getNumOperands()-1));
1038 if (Tmp1
!= Node
->getOperand(0) ||
1039 Tmp2
!= Node
->getOperand(Node
->getNumOperands()-1)) {
1040 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1043 Result
= DAG
.UpdateNodeOperands(Result
, &Ops
[0], Ops
.size());
1046 assert(IsLegalizingCall
&& "Call sequence imbalance between start/end?");
1047 // This finishes up call legalization.
1048 IsLegalizingCall
= false;
1050 // If the CALLSEQ_END node has a flag, remember that we legalized it.
1051 AddLegalizedOperand(SDValue(Node
, 0), Result
.getValue(0));
1052 if (Node
->getNumValues() == 2)
1053 AddLegalizedOperand(SDValue(Node
, 1), Result
.getValue(1));
1054 return Result
.getValue(Op
.getResNo());
1056 LoadSDNode
*LD
= cast
<LoadSDNode
>(Node
);
1057 Tmp1
= LegalizeOp(LD
->getChain()); // Legalize the chain.
1058 Tmp2
= LegalizeOp(LD
->getBasePtr()); // Legalize the base pointer.
1060 ISD::LoadExtType ExtType
= LD
->getExtensionType();
1061 if (ExtType
== ISD::NON_EXTLOAD
) {
1062 EVT VT
= Node
->getValueType(0);
1063 Result
= DAG
.UpdateNodeOperands(Result
, Tmp1
, Tmp2
, LD
->getOffset());
1064 Tmp3
= Result
.getValue(0);
1065 Tmp4
= Result
.getValue(1);
1067 switch (TLI
.getOperationAction(Node
->getOpcode(), VT
)) {
1068 default: llvm_unreachable("This action is not supported yet!");
1069 case TargetLowering::Legal
:
1070 // If this is an unaligned load and the target doesn't support it,
1072 if (!TLI
.allowsUnalignedMemoryAccesses()) {
1073 const Type
*Ty
= LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1074 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1075 if (LD
->getAlignment() < ABIAlignment
){
1076 Result
= ExpandUnalignedLoad(cast
<LoadSDNode
>(Result
.getNode()),
1078 Tmp3
= Result
.getOperand(0);
1079 Tmp4
= Result
.getOperand(1);
1080 Tmp3
= LegalizeOp(Tmp3
);
1081 Tmp4
= LegalizeOp(Tmp4
);
1085 case TargetLowering::Custom
:
1086 Tmp1
= TLI
.LowerOperation(Tmp3
, DAG
);
1087 if (Tmp1
.getNode()) {
1088 Tmp3
= LegalizeOp(Tmp1
);
1089 Tmp4
= LegalizeOp(Tmp1
.getValue(1));
1092 case TargetLowering::Promote
: {
1093 // Only promote a load of vector type to another.
1094 assert(VT
.isVector() && "Cannot promote this load!");
1095 // Change base type to a different vector type.
1096 EVT NVT
= TLI
.getTypeToPromoteTo(Node
->getOpcode(), VT
);
1098 Tmp1
= DAG
.getLoad(NVT
, dl
, Tmp1
, Tmp2
, LD
->getSrcValue(),
1099 LD
->getSrcValueOffset(),
1100 LD
->isVolatile(), LD
->getAlignment());
1101 Tmp3
= LegalizeOp(DAG
.getNode(ISD::BIT_CONVERT
, dl
, VT
, Tmp1
));
1102 Tmp4
= LegalizeOp(Tmp1
.getValue(1));
1106 // Since loads produce two values, make sure to remember that we
1107 // legalized both of them.
1108 AddLegalizedOperand(SDValue(Node
, 0), Tmp3
);
1109 AddLegalizedOperand(SDValue(Node
, 1), Tmp4
);
1110 return Op
.getResNo() ? Tmp4
: Tmp3
;
1112 EVT SrcVT
= LD
->getMemoryVT();
1113 unsigned SrcWidth
= SrcVT
.getSizeInBits();
1114 int SVOffset
= LD
->getSrcValueOffset();
1115 unsigned Alignment
= LD
->getAlignment();
1116 bool isVolatile
= LD
->isVolatile();
1118 if (SrcWidth
!= SrcVT
.getStoreSizeInBits() &&
1119 // Some targets pretend to have an i1 loading operation, and actually
1120 // load an i8. This trick is correct for ZEXTLOAD because the top 7
1121 // bits are guaranteed to be zero; it helps the optimizers understand
1122 // that these bits are zero. It is also useful for EXTLOAD, since it
1123 // tells the optimizers that those bits are undefined. It would be
1124 // nice to have an effective generic way of getting these benefits...
1125 // Until such a way is found, don't insist on promoting i1 here.
1126 (SrcVT
!= MVT::i1
||
1127 TLI
.getLoadExtAction(ExtType
, MVT::i1
) == TargetLowering::Promote
)) {
1128 // Promote to a byte-sized load if not loading an integral number of
1129 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
1130 unsigned NewWidth
= SrcVT
.getStoreSizeInBits();
1131 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(), NewWidth
);
1134 // The extra bits are guaranteed to be zero, since we stored them that
1135 // way. A zext load from NVT thus automatically gives zext from SrcVT.
1137 ISD::LoadExtType NewExtType
=
1138 ExtType
== ISD::ZEXTLOAD
? ISD::ZEXTLOAD
: ISD::EXTLOAD
;
1140 Result
= DAG
.getExtLoad(NewExtType
, dl
, Node
->getValueType(0),
1141 Tmp1
, Tmp2
, LD
->getSrcValue(), SVOffset
,
1142 NVT
, isVolatile
, Alignment
);
1144 Ch
= Result
.getValue(1); // The chain.
1146 if (ExtType
== ISD::SEXTLOAD
)
1147 // Having the top bits zero doesn't help when sign extending.
1148 Result
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
,
1149 Result
.getValueType(),
1150 Result
, DAG
.getValueType(SrcVT
));
1151 else if (ExtType
== ISD::ZEXTLOAD
|| NVT
== Result
.getValueType())
1152 // All the top bits are guaranteed to be zero - inform the optimizers.
1153 Result
= DAG
.getNode(ISD::AssertZext
, dl
,
1154 Result
.getValueType(), Result
,
1155 DAG
.getValueType(SrcVT
));
1157 Tmp1
= LegalizeOp(Result
);
1158 Tmp2
= LegalizeOp(Ch
);
1159 } else if (SrcWidth
& (SrcWidth
- 1)) {
1160 // If not loading a power-of-2 number of bits, expand as two loads.
1161 assert(SrcVT
.isExtended() && !SrcVT
.isVector() &&
1162 "Unsupported extload!");
1163 unsigned RoundWidth
= 1 << Log2_32(SrcWidth
);
1164 assert(RoundWidth
< SrcWidth
);
1165 unsigned ExtraWidth
= SrcWidth
- RoundWidth
;
1166 assert(ExtraWidth
< RoundWidth
);
1167 assert(!(RoundWidth
% 8) && !(ExtraWidth
% 8) &&
1168 "Load size not an integral number of bytes!");
1169 EVT RoundVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundWidth
);
1170 EVT ExtraVT
= EVT::getIntegerVT(*DAG
.getContext(), ExtraWidth
);
1172 unsigned IncrementSize
;
1174 if (TLI
.isLittleEndian()) {
1175 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
1176 // Load the bottom RoundWidth bits.
1177 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
,
1178 Node
->getValueType(0), Tmp1
, Tmp2
,
1179 LD
->getSrcValue(), SVOffset
, RoundVT
, isVolatile
,
1182 // Load the remaining ExtraWidth bits.
1183 IncrementSize
= RoundWidth
/ 8;
1184 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1185 DAG
.getIntPtrConstant(IncrementSize
));
1186 Hi
= DAG
.getExtLoad(ExtType
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
1187 LD
->getSrcValue(), SVOffset
+ IncrementSize
,
1188 ExtraVT
, isVolatile
,
1189 MinAlign(Alignment
, IncrementSize
));
1191 // Build a factor node to remember that this load is independent of the
1193 Ch
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
1196 // Move the top bits to the right place.
1197 Hi
= DAG
.getNode(ISD::SHL
, dl
, Hi
.getValueType(), Hi
,
1198 DAG
.getConstant(RoundWidth
, TLI
.getShiftAmountTy()));
1200 // Join the hi and lo parts.
1201 Result
= DAG
.getNode(ISD::OR
, dl
, Node
->getValueType(0), Lo
, Hi
);
1203 // Big endian - avoid unaligned loads.
1204 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
1205 // Load the top RoundWidth bits.
1206 Hi
= DAG
.getExtLoad(ExtType
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
1207 LD
->getSrcValue(), SVOffset
, RoundVT
, isVolatile
,
1210 // Load the remaining ExtraWidth bits.
1211 IncrementSize
= RoundWidth
/ 8;
1212 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1213 DAG
.getIntPtrConstant(IncrementSize
));
1214 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
,
1215 Node
->getValueType(0), Tmp1
, Tmp2
,
1216 LD
->getSrcValue(), SVOffset
+ IncrementSize
,
1217 ExtraVT
, isVolatile
,
1218 MinAlign(Alignment
, IncrementSize
));
1220 // Build a factor node to remember that this load is independent of the
1222 Ch
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
1225 // Move the top bits to the right place.
1226 Hi
= DAG
.getNode(ISD::SHL
, dl
, Hi
.getValueType(), Hi
,
1227 DAG
.getConstant(ExtraWidth
, TLI
.getShiftAmountTy()));
1229 // Join the hi and lo parts.
1230 Result
= DAG
.getNode(ISD::OR
, dl
, Node
->getValueType(0), Lo
, Hi
);
1233 Tmp1
= LegalizeOp(Result
);
1234 Tmp2
= LegalizeOp(Ch
);
1236 switch (TLI
.getLoadExtAction(ExtType
, SrcVT
)) {
1237 default: llvm_unreachable("This action is not supported yet!");
1238 case TargetLowering::Custom
:
1241 case TargetLowering::Legal
:
1242 Result
= DAG
.UpdateNodeOperands(Result
, Tmp1
, Tmp2
, LD
->getOffset());
1243 Tmp1
= Result
.getValue(0);
1244 Tmp2
= Result
.getValue(1);
1247 Tmp3
= TLI
.LowerOperation(Result
, DAG
);
1248 if (Tmp3
.getNode()) {
1249 Tmp1
= LegalizeOp(Tmp3
);
1250 Tmp2
= LegalizeOp(Tmp3
.getValue(1));
1253 // If this is an unaligned load and the target doesn't support it,
1255 if (!TLI
.allowsUnalignedMemoryAccesses()) {
1256 const Type
*Ty
= LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1257 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1258 if (LD
->getAlignment() < ABIAlignment
){
1259 Result
= ExpandUnalignedLoad(cast
<LoadSDNode
>(Result
.getNode()),
1261 Tmp1
= Result
.getOperand(0);
1262 Tmp2
= Result
.getOperand(1);
1263 Tmp1
= LegalizeOp(Tmp1
);
1264 Tmp2
= LegalizeOp(Tmp2
);
1269 case TargetLowering::Expand
:
1270 // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND
1271 if (SrcVT
== MVT::f32
&& Node
->getValueType(0) == MVT::f64
) {
1272 SDValue Load
= DAG
.getLoad(SrcVT
, dl
, Tmp1
, Tmp2
, LD
->getSrcValue(),
1273 LD
->getSrcValueOffset(),
1274 LD
->isVolatile(), LD
->getAlignment());
1275 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
,
1276 Node
->getValueType(0), Load
);
1277 Tmp1
= LegalizeOp(Result
); // Relegalize new nodes.
1278 Tmp2
= LegalizeOp(Load
.getValue(1));
1281 assert(ExtType
!= ISD::EXTLOAD
&&"EXTLOAD should always be supported!");
1282 // Turn the unsupported load into an EXTLOAD followed by an explicit
1283 // zero/sign extend inreg.
1284 Result
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
, Node
->getValueType(0),
1285 Tmp1
, Tmp2
, LD
->getSrcValue(),
1286 LD
->getSrcValueOffset(), SrcVT
,
1287 LD
->isVolatile(), LD
->getAlignment());
1289 if (ExtType
== ISD::SEXTLOAD
)
1290 ValRes
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
,
1291 Result
.getValueType(),
1292 Result
, DAG
.getValueType(SrcVT
));
1294 ValRes
= DAG
.getZeroExtendInReg(Result
, dl
, SrcVT
);
1295 Tmp1
= LegalizeOp(ValRes
); // Relegalize new nodes.
1296 Tmp2
= LegalizeOp(Result
.getValue(1)); // Relegalize new nodes.
1301 // Since loads produce two values, make sure to remember that we legalized
1303 AddLegalizedOperand(SDValue(Node
, 0), Tmp1
);
1304 AddLegalizedOperand(SDValue(Node
, 1), Tmp2
);
1305 return Op
.getResNo() ? Tmp2
: Tmp1
;
1309 StoreSDNode
*ST
= cast
<StoreSDNode
>(Node
);
1310 Tmp1
= LegalizeOp(ST
->getChain()); // Legalize the chain.
1311 Tmp2
= LegalizeOp(ST
->getBasePtr()); // Legalize the pointer.
1312 int SVOffset
= ST
->getSrcValueOffset();
1313 unsigned Alignment
= ST
->getAlignment();
1314 bool isVolatile
= ST
->isVolatile();
1316 if (!ST
->isTruncatingStore()) {
1317 if (SDNode
*OptStore
= OptimizeFloatStore(ST
).getNode()) {
1318 Result
= SDValue(OptStore
, 0);
1323 Tmp3
= LegalizeOp(ST
->getValue());
1324 Result
= DAG
.UpdateNodeOperands(Result
, Tmp1
, Tmp3
, Tmp2
,
1327 EVT VT
= Tmp3
.getValueType();
1328 switch (TLI
.getOperationAction(ISD::STORE
, VT
)) {
1329 default: llvm_unreachable("This action is not supported yet!");
1330 case TargetLowering::Legal
:
1331 // If this is an unaligned store and the target doesn't support it,
1333 if (!TLI
.allowsUnalignedMemoryAccesses()) {
1334 const Type
*Ty
= ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1335 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1336 if (ST
->getAlignment() < ABIAlignment
)
1337 Result
= ExpandUnalignedStore(cast
<StoreSDNode
>(Result
.getNode()),
1341 case TargetLowering::Custom
:
1342 Tmp1
= TLI
.LowerOperation(Result
, DAG
);
1343 if (Tmp1
.getNode()) Result
= Tmp1
;
1345 case TargetLowering::Promote
:
1346 assert(VT
.isVector() && "Unknown legal promote case!");
1347 Tmp3
= DAG
.getNode(ISD::BIT_CONVERT
, dl
,
1348 TLI
.getTypeToPromoteTo(ISD::STORE
, VT
), Tmp3
);
1349 Result
= DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
,
1350 ST
->getSrcValue(), SVOffset
, isVolatile
,
1357 Tmp3
= LegalizeOp(ST
->getValue());
1359 EVT StVT
= ST
->getMemoryVT();
1360 unsigned StWidth
= StVT
.getSizeInBits();
1362 if (StWidth
!= StVT
.getStoreSizeInBits()) {
1363 // Promote to a byte-sized store with upper bits zero if not
1364 // storing an integral number of bytes. For example, promote
1365 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
1366 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(), StVT
.getStoreSizeInBits());
1367 Tmp3
= DAG
.getZeroExtendInReg(Tmp3
, dl
, StVT
);
1368 Result
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1369 SVOffset
, NVT
, isVolatile
, Alignment
);
1370 } else if (StWidth
& (StWidth
- 1)) {
1371 // If not storing a power-of-2 number of bits, expand as two stores.
1372 assert(StVT
.isExtended() && !StVT
.isVector() &&
1373 "Unsupported truncstore!");
1374 unsigned RoundWidth
= 1 << Log2_32(StWidth
);
1375 assert(RoundWidth
< StWidth
);
1376 unsigned ExtraWidth
= StWidth
- RoundWidth
;
1377 assert(ExtraWidth
< RoundWidth
);
1378 assert(!(RoundWidth
% 8) && !(ExtraWidth
% 8) &&
1379 "Store size not an integral number of bytes!");
1380 EVT RoundVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundWidth
);
1381 EVT ExtraVT
= EVT::getIntegerVT(*DAG
.getContext(), ExtraWidth
);
1383 unsigned IncrementSize
;
1385 if (TLI
.isLittleEndian()) {
1386 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
1387 // Store the bottom RoundWidth bits.
1388 Lo
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1390 isVolatile
, Alignment
);
1392 // Store the remaining ExtraWidth bits.
1393 IncrementSize
= RoundWidth
/ 8;
1394 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1395 DAG
.getIntPtrConstant(IncrementSize
));
1396 Hi
= DAG
.getNode(ISD::SRL
, dl
, Tmp3
.getValueType(), Tmp3
,
1397 DAG
.getConstant(RoundWidth
, TLI
.getShiftAmountTy()));
1398 Hi
= DAG
.getTruncStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getSrcValue(),
1399 SVOffset
+ IncrementSize
, ExtraVT
, isVolatile
,
1400 MinAlign(Alignment
, IncrementSize
));
1402 // Big endian - avoid unaligned stores.
1403 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
1404 // Store the top RoundWidth bits.
1405 Hi
= DAG
.getNode(ISD::SRL
, dl
, Tmp3
.getValueType(), Tmp3
,
1406 DAG
.getConstant(ExtraWidth
, TLI
.getShiftAmountTy()));
1407 Hi
= DAG
.getTruncStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getSrcValue(),
1408 SVOffset
, RoundVT
, isVolatile
, Alignment
);
1410 // Store the remaining ExtraWidth bits.
1411 IncrementSize
= RoundWidth
/ 8;
1412 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1413 DAG
.getIntPtrConstant(IncrementSize
));
1414 Lo
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1415 SVOffset
+ IncrementSize
, ExtraVT
, isVolatile
,
1416 MinAlign(Alignment
, IncrementSize
));
1419 // The order of the stores doesn't matter.
1420 Result
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
, Hi
);
1422 if (Tmp1
!= ST
->getChain() || Tmp3
!= ST
->getValue() ||
1423 Tmp2
!= ST
->getBasePtr())
1424 Result
= DAG
.UpdateNodeOperands(Result
, Tmp1
, Tmp3
, Tmp2
,
1427 switch (TLI
.getTruncStoreAction(ST
->getValue().getValueType(), StVT
)) {
1428 default: llvm_unreachable("This action is not supported yet!");
1429 case TargetLowering::Legal
:
1430 // If this is an unaligned store and the target doesn't support it,
1432 if (!TLI
.allowsUnalignedMemoryAccesses()) {
1433 const Type
*Ty
= ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1434 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1435 if (ST
->getAlignment() < ABIAlignment
)
1436 Result
= ExpandUnalignedStore(cast
<StoreSDNode
>(Result
.getNode()),
1440 case TargetLowering::Custom
:
1441 Result
= TLI
.LowerOperation(Result
, DAG
);
1444 // TRUNCSTORE:i16 i32 -> STORE i16
1445 assert(isTypeLegal(StVT
) && "Do not know how to expand this store!");
1446 Tmp3
= DAG
.getNode(ISD::TRUNCATE
, dl
, StVT
, Tmp3
);
1447 Result
= DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1448 SVOffset
, isVolatile
, Alignment
);
1456 assert(Result
.getValueType() == Op
.getValueType() &&
1457 "Bad legalization!");
1459 // Make sure that the generated code is itself legal.
1461 Result
= LegalizeOp(Result
);
1463 // Note that LegalizeOp may be reentered even from single-use nodes, which
1464 // means that we always must cache transformed nodes.
1465 AddLegalizedOperand(Op
, Result
);
1469 SDValue
SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op
) {
1470 SDValue Vec
= Op
.getOperand(0);
1471 SDValue Idx
= Op
.getOperand(1);
1472 DebugLoc dl
= Op
.getDebugLoc();
1473 // Store the value to a temporary stack slot, then LOAD the returned part.
1474 SDValue StackPtr
= DAG
.CreateStackTemporary(Vec
.getValueType());
1475 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Vec
, StackPtr
, NULL
, 0);
1477 // Add the offset to the index.
1479 Vec
.getValueType().getVectorElementType().getSizeInBits()/8;
1480 Idx
= DAG
.getNode(ISD::MUL
, dl
, Idx
.getValueType(), Idx
,
1481 DAG
.getConstant(EltSize
, Idx
.getValueType()));
1483 if (Idx
.getValueType().bitsGT(TLI
.getPointerTy()))
1484 Idx
= DAG
.getNode(ISD::TRUNCATE
, dl
, TLI
.getPointerTy(), Idx
);
1486 Idx
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, TLI
.getPointerTy(), Idx
);
1488 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, Idx
.getValueType(), Idx
, StackPtr
);
1490 if (Op
.getValueType().isVector())
1491 return DAG
.getLoad(Op
.getValueType(), dl
, Ch
, StackPtr
, NULL
, 0);
1493 return DAG
.getExtLoad(ISD::EXTLOAD
, dl
, Op
.getValueType(), Ch
, StackPtr
,
1494 NULL
, 0, Vec
.getValueType().getVectorElementType());
1497 SDValue
SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode
* Node
) {
1498 // We can't handle this case efficiently. Allocate a sufficiently
1499 // aligned object on the stack, store each element into it, then load
1500 // the result as a vector.
1501 // Create the stack frame object.
1502 EVT VT
= Node
->getValueType(0);
1503 EVT OpVT
= Node
->getOperand(0).getValueType();
1504 DebugLoc dl
= Node
->getDebugLoc();
1505 SDValue FIPtr
= DAG
.CreateStackTemporary(VT
);
1506 int FI
= cast
<FrameIndexSDNode
>(FIPtr
.getNode())->getIndex();
1507 const Value
*SV
= PseudoSourceValue::getFixedStack(FI
);
1509 // Emit a store of each element to the stack slot.
1510 SmallVector
<SDValue
, 8> Stores
;
1511 unsigned TypeByteSize
= OpVT
.getSizeInBits() / 8;
1512 // Store (in the right endianness) the elements to memory.
1513 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1514 // Ignore undef elements.
1515 if (Node
->getOperand(i
).getOpcode() == ISD::UNDEF
) continue;
1517 unsigned Offset
= TypeByteSize
*i
;
1519 SDValue Idx
= DAG
.getConstant(Offset
, FIPtr
.getValueType());
1520 Idx
= DAG
.getNode(ISD::ADD
, dl
, FIPtr
.getValueType(), FIPtr
, Idx
);
1522 Stores
.push_back(DAG
.getStore(DAG
.getEntryNode(), dl
, Node
->getOperand(i
),
1527 if (!Stores
.empty()) // Not all undef elements?
1528 StoreChain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1529 &Stores
[0], Stores
.size());
1531 StoreChain
= DAG
.getEntryNode();
1533 // Result is a load from the stack slot.
1534 return DAG
.getLoad(VT
, dl
, StoreChain
, FIPtr
, SV
, 0);
1537 SDValue
SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode
* Node
) {
1538 DebugLoc dl
= Node
->getDebugLoc();
1539 SDValue Tmp1
= Node
->getOperand(0);
1540 SDValue Tmp2
= Node
->getOperand(1);
1541 assert((Tmp2
.getValueType() == MVT::f32
||
1542 Tmp2
.getValueType() == MVT::f64
) &&
1543 "Ugly special-cased code!");
1544 // Get the sign bit of the RHS.
1546 EVT IVT
= Tmp2
.getValueType() == MVT::f64
? MVT::i64
: MVT::i32
;
1547 if (isTypeLegal(IVT
)) {
1548 SignBit
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, IVT
, Tmp2
);
1550 assert(isTypeLegal(TLI
.getPointerTy()) &&
1551 (TLI
.getPointerTy() == MVT::i32
||
1552 TLI
.getPointerTy() == MVT::i64
) &&
1553 "Legal type for load?!");
1554 SDValue StackPtr
= DAG
.CreateStackTemporary(Tmp2
.getValueType());
1555 SDValue StorePtr
= StackPtr
, LoadPtr
= StackPtr
;
1557 DAG
.getStore(DAG
.getEntryNode(), dl
, Tmp2
, StorePtr
, NULL
, 0);
1558 if (Tmp2
.getValueType() == MVT::f64
&& TLI
.isLittleEndian())
1559 LoadPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(),
1560 LoadPtr
, DAG
.getIntPtrConstant(4));
1561 SignBit
= DAG
.getExtLoad(ISD::SEXTLOAD
, dl
, TLI
.getPointerTy(),
1562 Ch
, LoadPtr
, NULL
, 0, MVT::i32
);
1565 DAG
.getSetCC(dl
, TLI
.getSetCCResultType(SignBit
.getValueType()),
1566 SignBit
, DAG
.getConstant(0, SignBit
.getValueType()),
1568 // Get the absolute value of the result.
1569 SDValue AbsVal
= DAG
.getNode(ISD::FABS
, dl
, Tmp1
.getValueType(), Tmp1
);
1570 // Select between the nabs and abs value based on the sign bit of
1572 return DAG
.getNode(ISD::SELECT
, dl
, AbsVal
.getValueType(), SignBit
,
1573 DAG
.getNode(ISD::FNEG
, dl
, AbsVal
.getValueType(), AbsVal
),
1577 SDValue
SelectionDAGLegalize::ExpandDBG_STOPPOINT(SDNode
* Node
) {
1578 DebugLoc dl
= Node
->getDebugLoc();
1579 DwarfWriter
*DW
= DAG
.getDwarfWriter();
1580 bool useDEBUG_LOC
= TLI
.isOperationLegalOrCustom(ISD::DEBUG_LOC
,
1582 bool useLABEL
= TLI
.isOperationLegalOrCustom(ISD::DBG_LABEL
, MVT::Other
);
1584 const DbgStopPointSDNode
*DSP
= cast
<DbgStopPointSDNode
>(Node
);
1585 GlobalVariable
*CU_GV
= cast
<GlobalVariable
>(DSP
->getCompileUnit());
1586 if (DW
&& (useDEBUG_LOC
|| useLABEL
) && !CU_GV
->isDeclaration()) {
1587 DICompileUnit
CU(cast
<GlobalVariable
>(DSP
->getCompileUnit()));
1589 unsigned Line
= DSP
->getLine();
1590 unsigned Col
= DSP
->getColumn();
1592 if (OptLevel
== CodeGenOpt::None
) {
1593 // A bit self-referential to have DebugLoc on Debug_Loc nodes, but it
1594 // won't hurt anything.
1596 return DAG
.getNode(ISD::DEBUG_LOC
, dl
, MVT::Other
, Node
->getOperand(0),
1597 DAG
.getConstant(Line
, MVT::i32
),
1598 DAG
.getConstant(Col
, MVT::i32
),
1599 DAG
.getSrcValue(CU
.getGV()));
1601 unsigned ID
= DW
->RecordSourceLine(Line
, Col
, CU
);
1602 return DAG
.getLabel(ISD::DBG_LABEL
, dl
, Node
->getOperand(0), ID
);
1606 return Node
->getOperand(0);
1609 void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode
* Node
,
1610 SmallVectorImpl
<SDValue
> &Results
) {
1611 unsigned SPReg
= TLI
.getStackPointerRegisterToSaveRestore();
1612 assert(SPReg
&& "Target cannot require DYNAMIC_STACKALLOC expansion and"
1613 " not tell us which reg is the stack pointer!");
1614 DebugLoc dl
= Node
->getDebugLoc();
1615 EVT VT
= Node
->getValueType(0);
1616 SDValue Tmp1
= SDValue(Node
, 0);
1617 SDValue Tmp2
= SDValue(Node
, 1);
1618 SDValue Tmp3
= Node
->getOperand(2);
1619 SDValue Chain
= Tmp1
.getOperand(0);
1621 // Chain the dynamic stack allocation so that it doesn't modify the stack
1622 // pointer when other instructions are using the stack.
1623 Chain
= DAG
.getCALLSEQ_START(Chain
, DAG
.getIntPtrConstant(0, true));
1625 SDValue Size
= Tmp2
.getOperand(1);
1626 SDValue SP
= DAG
.getCopyFromReg(Chain
, dl
, SPReg
, VT
);
1627 Chain
= SP
.getValue(1);
1628 unsigned Align
= cast
<ConstantSDNode
>(Tmp3
)->getZExtValue();
1629 unsigned StackAlign
=
1630 TLI
.getTargetMachine().getFrameInfo()->getStackAlignment();
1631 if (Align
> StackAlign
)
1632 SP
= DAG
.getNode(ISD::AND
, dl
, VT
, SP
,
1633 DAG
.getConstant(-(uint64_t)Align
, VT
));
1634 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, VT
, SP
, Size
); // Value
1635 Chain
= DAG
.getCopyToReg(Chain
, dl
, SPReg
, Tmp1
); // Output chain
1637 Tmp2
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(0, true),
1638 DAG
.getIntPtrConstant(0, true), SDValue());
1640 Results
.push_back(Tmp1
);
1641 Results
.push_back(Tmp2
);
1644 /// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and
1645 /// condition code CC on the current target. This routine assumes LHS and rHS
1646 /// have already been legalized by LegalizeSetCCOperands. It expands SETCC with
1647 /// illegal condition code into AND / OR of multiple SETCC values.
1648 void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT
,
1649 SDValue
&LHS
, SDValue
&RHS
,
1652 EVT OpVT
= LHS
.getValueType();
1653 ISD::CondCode CCCode
= cast
<CondCodeSDNode
>(CC
)->get();
1654 switch (TLI
.getCondCodeAction(CCCode
, OpVT
)) {
1655 default: llvm_unreachable("Unknown condition code action!");
1656 case TargetLowering::Legal
:
1659 case TargetLowering::Expand
: {
1660 ISD::CondCode CC1
= ISD::SETCC_INVALID
, CC2
= ISD::SETCC_INVALID
;
1663 default: llvm_unreachable("Don't know how to expand this condition!");
1664 case ISD::SETOEQ
: CC1
= ISD::SETEQ
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1665 case ISD::SETOGT
: CC1
= ISD::SETGT
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1666 case ISD::SETOGE
: CC1
= ISD::SETGE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1667 case ISD::SETOLT
: CC1
= ISD::SETLT
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1668 case ISD::SETOLE
: CC1
= ISD::SETLE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1669 case ISD::SETONE
: CC1
= ISD::SETNE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1670 case ISD::SETUEQ
: CC1
= ISD::SETEQ
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1671 case ISD::SETUGT
: CC1
= ISD::SETGT
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1672 case ISD::SETUGE
: CC1
= ISD::SETGE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1673 case ISD::SETULT
: CC1
= ISD::SETLT
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1674 case ISD::SETULE
: CC1
= ISD::SETLE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1675 case ISD::SETUNE
: CC1
= ISD::SETNE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1676 // FIXME: Implement more expansions.
1679 SDValue SetCC1
= DAG
.getSetCC(dl
, VT
, LHS
, RHS
, CC1
);
1680 SDValue SetCC2
= DAG
.getSetCC(dl
, VT
, LHS
, RHS
, CC2
);
1681 LHS
= DAG
.getNode(Opc
, dl
, VT
, SetCC1
, SetCC2
);
1689 /// EmitStackConvert - Emit a store/load combination to the stack. This stores
1690 /// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does
1691 /// a load from the stack slot to DestVT, extending it if needed.
1692 /// The resultant code need not be legal.
1693 SDValue
SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp
,
1697 // Create the stack frame object.
1699 TLI
.getTargetData()->getPrefTypeAlignment(SrcOp
.getValueType().
1700 getTypeForEVT(*DAG
.getContext()));
1701 SDValue FIPtr
= DAG
.CreateStackTemporary(SlotVT
, SrcAlign
);
1703 FrameIndexSDNode
*StackPtrFI
= cast
<FrameIndexSDNode
>(FIPtr
);
1704 int SPFI
= StackPtrFI
->getIndex();
1705 const Value
*SV
= PseudoSourceValue::getFixedStack(SPFI
);
1707 unsigned SrcSize
= SrcOp
.getValueType().getSizeInBits();
1708 unsigned SlotSize
= SlotVT
.getSizeInBits();
1709 unsigned DestSize
= DestVT
.getSizeInBits();
1710 unsigned DestAlign
=
1711 TLI
.getTargetData()->getPrefTypeAlignment(DestVT
.getTypeForEVT(*DAG
.getContext()));
1713 // Emit a store to the stack slot. Use a truncstore if the input value is
1714 // later than DestVT.
1717 if (SrcSize
> SlotSize
)
1718 Store
= DAG
.getTruncStore(DAG
.getEntryNode(), dl
, SrcOp
, FIPtr
,
1719 SV
, 0, SlotVT
, false, SrcAlign
);
1721 assert(SrcSize
== SlotSize
&& "Invalid store");
1722 Store
= DAG
.getStore(DAG
.getEntryNode(), dl
, SrcOp
, FIPtr
,
1723 SV
, 0, false, SrcAlign
);
1726 // Result is a load from the stack slot.
1727 if (SlotSize
== DestSize
)
1728 return DAG
.getLoad(DestVT
, dl
, Store
, FIPtr
, SV
, 0, false, DestAlign
);
1730 assert(SlotSize
< DestSize
&& "Unknown extension!");
1731 return DAG
.getExtLoad(ISD::EXTLOAD
, dl
, DestVT
, Store
, FIPtr
, SV
, 0, SlotVT
,
1735 SDValue
SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode
*Node
) {
1736 DebugLoc dl
= Node
->getDebugLoc();
1737 // Create a vector sized/aligned stack slot, store the value to element #0,
1738 // then load the whole vector back out.
1739 SDValue StackPtr
= DAG
.CreateStackTemporary(Node
->getValueType(0));
1741 FrameIndexSDNode
*StackPtrFI
= cast
<FrameIndexSDNode
>(StackPtr
);
1742 int SPFI
= StackPtrFI
->getIndex();
1744 SDValue Ch
= DAG
.getTruncStore(DAG
.getEntryNode(), dl
, Node
->getOperand(0),
1746 PseudoSourceValue::getFixedStack(SPFI
), 0,
1747 Node
->getValueType(0).getVectorElementType());
1748 return DAG
.getLoad(Node
->getValueType(0), dl
, Ch
, StackPtr
,
1749 PseudoSourceValue::getFixedStack(SPFI
), 0);
1753 /// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't
1754 /// support the operation, but do support the resultant vector type.
1755 SDValue
SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode
*Node
) {
1756 unsigned NumElems
= Node
->getNumOperands();
1757 SDValue Value1
, Value2
;
1758 DebugLoc dl
= Node
->getDebugLoc();
1759 EVT VT
= Node
->getValueType(0);
1760 EVT OpVT
= Node
->getOperand(0).getValueType();
1761 EVT EltVT
= VT
.getVectorElementType();
1763 // If the only non-undef value is the low element, turn this into a
1764 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X.
1765 bool isOnlyLowElement
= true;
1766 bool MoreThanTwoValues
= false;
1767 bool isConstant
= true;
1768 for (unsigned i
= 0; i
< NumElems
; ++i
) {
1769 SDValue V
= Node
->getOperand(i
);
1770 if (V
.getOpcode() == ISD::UNDEF
)
1773 isOnlyLowElement
= false;
1774 if (!isa
<ConstantFPSDNode
>(V
) && !isa
<ConstantSDNode
>(V
))
1777 if (!Value1
.getNode()) {
1779 } else if (!Value2
.getNode()) {
1782 } else if (V
!= Value1
&& V
!= Value2
) {
1783 MoreThanTwoValues
= true;
1787 if (!Value1
.getNode())
1788 return DAG
.getUNDEF(VT
);
1790 if (isOnlyLowElement
)
1791 return DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Node
->getOperand(0));
1793 // If all elements are constants, create a load from the constant pool.
1795 std::vector
<Constant
*> CV
;
1796 for (unsigned i
= 0, e
= NumElems
; i
!= e
; ++i
) {
1797 if (ConstantFPSDNode
*V
=
1798 dyn_cast
<ConstantFPSDNode
>(Node
->getOperand(i
))) {
1799 CV
.push_back(const_cast<ConstantFP
*>(V
->getConstantFPValue()));
1800 } else if (ConstantSDNode
*V
=
1801 dyn_cast
<ConstantSDNode
>(Node
->getOperand(i
))) {
1802 CV
.push_back(const_cast<ConstantInt
*>(V
->getConstantIntValue()));
1804 assert(Node
->getOperand(i
).getOpcode() == ISD::UNDEF
);
1805 const Type
*OpNTy
= OpVT
.getTypeForEVT(*DAG
.getContext());
1806 CV
.push_back(UndefValue::get(OpNTy
));
1809 Constant
*CP
= ConstantVector::get(CV
);
1810 SDValue CPIdx
= DAG
.getConstantPool(CP
, TLI
.getPointerTy());
1811 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
1812 return DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(), CPIdx
,
1813 PseudoSourceValue::getConstantPool(), 0,
1817 if (!MoreThanTwoValues
) {
1818 SmallVector
<int, 8> ShuffleVec(NumElems
, -1);
1819 for (unsigned i
= 0; i
< NumElems
; ++i
) {
1820 SDValue V
= Node
->getOperand(i
);
1821 if (V
.getOpcode() == ISD::UNDEF
)
1823 ShuffleVec
[i
] = V
== Value1
? 0 : NumElems
;
1825 if (TLI
.isShuffleMaskLegal(ShuffleVec
, Node
->getValueType(0))) {
1826 // Get the splatted value into the low element of a vector register.
1827 SDValue Vec1
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value1
);
1829 if (Value2
.getNode())
1830 Vec2
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value2
);
1832 Vec2
= DAG
.getUNDEF(VT
);
1834 // Return shuffle(LowValVec, undef, <0,0,0,0>)
1835 return DAG
.getVectorShuffle(VT
, dl
, Vec1
, Vec2
, ShuffleVec
.data());
1839 // Otherwise, we can't handle this case efficiently.
1840 return ExpandVectorBuildThroughStack(Node
);
1843 // ExpandLibCall - Expand a node into a call to a libcall. If the result value
1844 // does not fit into a register, return the lo part and set the hi part to the
1845 // by-reg argument. If it does fit into a single register, return the result
1846 // and leave the Hi part unset.
1847 SDValue
SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC
, SDNode
*Node
,
1849 assert(!IsLegalizingCall
&& "Cannot overlap legalization of calls!");
1850 // The input chain to this libcall is the entry node of the function.
1851 // Legalizing the call will automatically add the previous call to the
1853 SDValue InChain
= DAG
.getEntryNode();
1855 TargetLowering::ArgListTy Args
;
1856 TargetLowering::ArgListEntry Entry
;
1857 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1858 EVT ArgVT
= Node
->getOperand(i
).getValueType();
1859 const Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
1860 Entry
.Node
= Node
->getOperand(i
); Entry
.Ty
= ArgTy
;
1861 Entry
.isSExt
= isSigned
;
1862 Entry
.isZExt
= !isSigned
;
1863 Args
.push_back(Entry
);
1865 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
1866 TLI
.getPointerTy());
1868 // Splice the libcall in wherever FindInputOutputChains tells us to.
1869 const Type
*RetTy
= Node
->getValueType(0).getTypeForEVT(*DAG
.getContext());
1870 std::pair
<SDValue
, SDValue
> CallInfo
=
1871 TLI
.LowerCallTo(InChain
, RetTy
, isSigned
, !isSigned
, false, false,
1872 0, TLI
.getLibcallCallingConv(LC
), false,
1873 /*isReturnValueUsed=*/true,
1875 Node
->getDebugLoc());
1877 // Legalize the call sequence, starting with the chain. This will advance
1878 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
1879 // was added by LowerCallTo (guaranteeing proper serialization of calls).
1880 LegalizeOp(CallInfo
.second
);
1881 return CallInfo
.first
;
1884 SDValue
SelectionDAGLegalize::ExpandFPLibCall(SDNode
* Node
,
1885 RTLIB::Libcall Call_F32
,
1886 RTLIB::Libcall Call_F64
,
1887 RTLIB::Libcall Call_F80
,
1888 RTLIB::Libcall Call_PPCF128
) {
1890 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
1891 default: llvm_unreachable("Unexpected request for libcall!");
1892 case MVT::f32
: LC
= Call_F32
; break;
1893 case MVT::f64
: LC
= Call_F64
; break;
1894 case MVT::f80
: LC
= Call_F80
; break;
1895 case MVT::ppcf128
: LC
= Call_PPCF128
; break;
1897 return ExpandLibCall(LC
, Node
, false);
1900 SDValue
SelectionDAGLegalize::ExpandIntLibCall(SDNode
* Node
, bool isSigned
,
1901 RTLIB::Libcall Call_I16
,
1902 RTLIB::Libcall Call_I32
,
1903 RTLIB::Libcall Call_I64
,
1904 RTLIB::Libcall Call_I128
) {
1906 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
1907 default: llvm_unreachable("Unexpected request for libcall!");
1908 case MVT::i16
: LC
= Call_I16
; break;
1909 case MVT::i32
: LC
= Call_I32
; break;
1910 case MVT::i64
: LC
= Call_I64
; break;
1911 case MVT::i128
: LC
= Call_I128
; break;
1913 return ExpandLibCall(LC
, Node
, isSigned
);
1916 /// ExpandLegalINT_TO_FP - This function is responsible for legalizing a
1917 /// INT_TO_FP operation of the specified operand when the target requests that
1918 /// we expand it. At this point, we know that the result and operand types are
1919 /// legal for the target.
1920 SDValue
SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned
,
1924 if (Op0
.getValueType() == MVT::i32
) {
1925 // simple 32-bit [signed|unsigned] integer to float/double expansion
1927 // Get the stack frame index of a 8 byte buffer.
1928 SDValue StackSlot
= DAG
.CreateStackTemporary(MVT::f64
);
1930 // word offset constant for Hi/Lo address computation
1931 SDValue WordOff
= DAG
.getConstant(sizeof(int), TLI
.getPointerTy());
1932 // set up Hi and Lo (into buffer) address based on endian
1933 SDValue Hi
= StackSlot
;
1934 SDValue Lo
= DAG
.getNode(ISD::ADD
, dl
,
1935 TLI
.getPointerTy(), StackSlot
, WordOff
);
1936 if (TLI
.isLittleEndian())
1939 // if signed map to unsigned space
1942 // constant used to invert sign bit (signed to unsigned mapping)
1943 SDValue SignBit
= DAG
.getConstant(0x80000000u
, MVT::i32
);
1944 Op0Mapped
= DAG
.getNode(ISD::XOR
, dl
, MVT::i32
, Op0
, SignBit
);
1948 // store the lo of the constructed double - based on integer input
1949 SDValue Store1
= DAG
.getStore(DAG
.getEntryNode(), dl
,
1950 Op0Mapped
, Lo
, NULL
, 0);
1951 // initial hi portion of constructed double
1952 SDValue InitialHi
= DAG
.getConstant(0x43300000u
, MVT::i32
);
1953 // store the hi of the constructed double - biased exponent
1954 SDValue Store2
=DAG
.getStore(Store1
, dl
, InitialHi
, Hi
, NULL
, 0);
1955 // load the constructed double
1956 SDValue Load
= DAG
.getLoad(MVT::f64
, dl
, Store2
, StackSlot
, NULL
, 0);
1957 // FP constant to bias correct the final result
1958 SDValue Bias
= DAG
.getConstantFP(isSigned
?
1959 BitsToDouble(0x4330000080000000ULL
) :
1960 BitsToDouble(0x4330000000000000ULL
),
1962 // subtract the bias
1963 SDValue Sub
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f64
, Load
, Bias
);
1966 // handle final rounding
1967 if (DestVT
== MVT::f64
) {
1970 } else if (DestVT
.bitsLT(MVT::f64
)) {
1971 Result
= DAG
.getNode(ISD::FP_ROUND
, dl
, DestVT
, Sub
,
1972 DAG
.getIntPtrConstant(0));
1973 } else if (DestVT
.bitsGT(MVT::f64
)) {
1974 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
, DestVT
, Sub
);
1978 assert(!isSigned
&& "Legalize cannot Expand SINT_TO_FP for i64 yet");
1979 SDValue Tmp1
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, DestVT
, Op0
);
1981 SDValue SignSet
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(Op0
.getValueType()),
1982 Op0
, DAG
.getConstant(0, Op0
.getValueType()),
1984 SDValue Zero
= DAG
.getIntPtrConstant(0), Four
= DAG
.getIntPtrConstant(4);
1985 SDValue CstOffset
= DAG
.getNode(ISD::SELECT
, dl
, Zero
.getValueType(),
1986 SignSet
, Four
, Zero
);
1988 // If the sign bit of the integer is set, the large number will be treated
1989 // as a negative number. To counteract this, the dynamic code adds an
1990 // offset depending on the data type.
1992 switch (Op0
.getValueType().getSimpleVT().SimpleTy
) {
1993 default: llvm_unreachable("Unsupported integer type!");
1994 case MVT::i8
: FF
= 0x43800000ULL
; break; // 2^8 (as a float)
1995 case MVT::i16
: FF
= 0x47800000ULL
; break; // 2^16 (as a float)
1996 case MVT::i32
: FF
= 0x4F800000ULL
; break; // 2^32 (as a float)
1997 case MVT::i64
: FF
= 0x5F800000ULL
; break; // 2^64 (as a float)
1999 if (TLI
.isLittleEndian()) FF
<<= 32;
2000 Constant
*FudgeFactor
= ConstantInt::get(
2001 Type::getInt64Ty(*DAG
.getContext()), FF
);
2003 SDValue CPIdx
= DAG
.getConstantPool(FudgeFactor
, TLI
.getPointerTy());
2004 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
2005 CPIdx
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), CPIdx
, CstOffset
);
2006 Alignment
= std::min(Alignment
, 4u);
2008 if (DestVT
== MVT::f32
)
2009 FudgeInReg
= DAG
.getLoad(MVT::f32
, dl
, DAG
.getEntryNode(), CPIdx
,
2010 PseudoSourceValue::getConstantPool(), 0,
2014 LegalizeOp(DAG
.getExtLoad(ISD::EXTLOAD
, dl
, DestVT
,
2015 DAG
.getEntryNode(), CPIdx
,
2016 PseudoSourceValue::getConstantPool(), 0,
2017 MVT::f32
, false, Alignment
));
2020 return DAG
.getNode(ISD::FADD
, dl
, DestVT
, Tmp1
, FudgeInReg
);
2023 /// PromoteLegalINT_TO_FP - This function is responsible for legalizing a
2024 /// *INT_TO_FP operation of the specified operand when the target requests that
2025 /// we promote it. At this point, we know that the result and operand types are
2026 /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
2027 /// operation that takes a larger input.
2028 SDValue
SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp
,
2032 // First step, figure out the appropriate *INT_TO_FP operation to use.
2033 EVT NewInTy
= LegalOp
.getValueType();
2035 unsigned OpToUse
= 0;
2037 // Scan for the appropriate larger type to use.
2039 NewInTy
= (MVT::SimpleValueType
)(NewInTy
.getSimpleVT().SimpleTy
+1);
2040 assert(NewInTy
.isInteger() && "Ran out of possibilities!");
2042 // If the target supports SINT_TO_FP of this type, use it.
2043 if (TLI
.isOperationLegalOrCustom(ISD::SINT_TO_FP
, NewInTy
)) {
2044 OpToUse
= ISD::SINT_TO_FP
;
2047 if (isSigned
) continue;
2049 // If the target supports UINT_TO_FP of this type, use it.
2050 if (TLI
.isOperationLegalOrCustom(ISD::UINT_TO_FP
, NewInTy
)) {
2051 OpToUse
= ISD::UINT_TO_FP
;
2055 // Otherwise, try a larger type.
2058 // Okay, we found the operation and type to use. Zero extend our input to the
2059 // desired type then run the operation on it.
2060 return DAG
.getNode(OpToUse
, dl
, DestVT
,
2061 DAG
.getNode(isSigned
? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
,
2062 dl
, NewInTy
, LegalOp
));
2065 /// PromoteLegalFP_TO_INT - This function is responsible for legalizing a
2066 /// FP_TO_*INT operation of the specified operand when the target requests that
2067 /// we promote it. At this point, we know that the result and operand types are
2068 /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
2069 /// operation that returns a larger result.
2070 SDValue
SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp
,
2074 // First step, figure out the appropriate FP_TO*INT operation to use.
2075 EVT NewOutTy
= DestVT
;
2077 unsigned OpToUse
= 0;
2079 // Scan for the appropriate larger type to use.
2081 NewOutTy
= (MVT::SimpleValueType
)(NewOutTy
.getSimpleVT().SimpleTy
+1);
2082 assert(NewOutTy
.isInteger() && "Ran out of possibilities!");
2084 if (TLI
.isOperationLegalOrCustom(ISD::FP_TO_SINT
, NewOutTy
)) {
2085 OpToUse
= ISD::FP_TO_SINT
;
2089 if (TLI
.isOperationLegalOrCustom(ISD::FP_TO_UINT
, NewOutTy
)) {
2090 OpToUse
= ISD::FP_TO_UINT
;
2094 // Otherwise, try a larger type.
2098 // Okay, we found the operation and type to use.
2099 SDValue Operation
= DAG
.getNode(OpToUse
, dl
, NewOutTy
, LegalOp
);
2101 // Truncate the result of the extended FP_TO_*INT operation to the desired
2103 return DAG
.getNode(ISD::TRUNCATE
, dl
, DestVT
, Operation
);
2106 /// ExpandBSWAP - Open code the operations for BSWAP of the specified operation.
2108 SDValue
SelectionDAGLegalize::ExpandBSWAP(SDValue Op
, DebugLoc dl
) {
2109 EVT VT
= Op
.getValueType();
2110 EVT SHVT
= TLI
.getShiftAmountTy();
2111 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
, Tmp5
, Tmp6
, Tmp7
, Tmp8
;
2112 switch (VT
.getSimpleVT().SimpleTy
) {
2113 default: llvm_unreachable("Unhandled Expand type in BSWAP!");
2115 Tmp2
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2116 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2117 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp1
, Tmp2
);
2119 Tmp4
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2120 Tmp3
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2121 Tmp2
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2122 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2123 Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp3
, DAG
.getConstant(0xFF0000, VT
));
2124 Tmp2
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp2
, DAG
.getConstant(0xFF00, VT
));
2125 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp3
);
2126 Tmp2
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp2
, Tmp1
);
2127 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp2
);
2129 Tmp8
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(56, SHVT
));
2130 Tmp7
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(40, SHVT
));
2131 Tmp6
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2132 Tmp5
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2133 Tmp4
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2134 Tmp3
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2135 Tmp2
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(40, SHVT
));
2136 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(56, SHVT
));
2137 Tmp7
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp7
, DAG
.getConstant(255ULL<<48, VT
));
2138 Tmp6
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp6
, DAG
.getConstant(255ULL<<40, VT
));
2139 Tmp5
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp5
, DAG
.getConstant(255ULL<<32, VT
));
2140 Tmp4
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp4
, DAG
.getConstant(255ULL<<24, VT
));
2141 Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp3
, DAG
.getConstant(255ULL<<16, VT
));
2142 Tmp2
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp2
, DAG
.getConstant(255ULL<<8 , VT
));
2143 Tmp8
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp7
);
2144 Tmp6
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp6
, Tmp5
);
2145 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp3
);
2146 Tmp2
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp2
, Tmp1
);
2147 Tmp8
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp6
);
2148 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp2
);
2149 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp4
);
2153 /// ExpandBitCount - Expand the specified bitcount instruction into operations.
2155 SDValue
SelectionDAGLegalize::ExpandBitCount(unsigned Opc
, SDValue Op
,
2158 default: llvm_unreachable("Cannot expand this yet!");
2160 static const uint64_t mask
[6] = {
2161 0x5555555555555555ULL
, 0x3333333333333333ULL
,
2162 0x0F0F0F0F0F0F0F0FULL
, 0x00FF00FF00FF00FFULL
,
2163 0x0000FFFF0000FFFFULL
, 0x00000000FFFFFFFFULL
2165 EVT VT
= Op
.getValueType();
2166 EVT ShVT
= TLI
.getShiftAmountTy();
2167 unsigned len
= VT
.getSizeInBits();
2168 for (unsigned i
= 0; (1U << i
) <= (len
/ 2); ++i
) {
2169 //x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8])
2170 unsigned EltSize
= VT
.isVector() ?
2171 VT
.getVectorElementType().getSizeInBits() : len
;
2172 SDValue Tmp2
= DAG
.getConstant(APInt(EltSize
, mask
[i
]), VT
);
2173 SDValue Tmp3
= DAG
.getConstant(1ULL << i
, ShVT
);
2174 Op
= DAG
.getNode(ISD::ADD
, dl
, VT
,
2175 DAG
.getNode(ISD::AND
, dl
, VT
, Op
, Tmp2
),
2176 DAG
.getNode(ISD::AND
, dl
, VT
,
2177 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, Tmp3
),
2183 // for now, we do this:
2184 // x = x | (x >> 1);
2185 // x = x | (x >> 2);
2187 // x = x | (x >>16);
2188 // x = x | (x >>32); // for 64-bit input
2189 // return popcount(~x);
2191 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc
2192 EVT VT
= Op
.getValueType();
2193 EVT ShVT
= TLI
.getShiftAmountTy();
2194 unsigned len
= VT
.getSizeInBits();
2195 for (unsigned i
= 0; (1U << i
) <= (len
/ 2); ++i
) {
2196 SDValue Tmp3
= DAG
.getConstant(1ULL << i
, ShVT
);
2197 Op
= DAG
.getNode(ISD::OR
, dl
, VT
, Op
,
2198 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, Tmp3
));
2200 Op
= DAG
.getNOT(dl
, Op
, VT
);
2201 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Op
);
2204 // for now, we use: { return popcount(~x & (x - 1)); }
2205 // unless the target has ctlz but not ctpop, in which case we use:
2206 // { return 32 - nlz(~x & (x-1)); }
2207 // see also http://www.hackersdelight.org/HDcode/ntz.cc
2208 EVT VT
= Op
.getValueType();
2209 SDValue Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
,
2210 DAG
.getNOT(dl
, Op
, VT
),
2211 DAG
.getNode(ISD::SUB
, dl
, VT
, Op
,
2212 DAG
.getConstant(1, VT
)));
2213 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
2214 if (!TLI
.isOperationLegalOrCustom(ISD::CTPOP
, VT
) &&
2215 TLI
.isOperationLegalOrCustom(ISD::CTLZ
, VT
))
2216 return DAG
.getNode(ISD::SUB
, dl
, VT
,
2217 DAG
.getConstant(VT
.getSizeInBits(), VT
),
2218 DAG
.getNode(ISD::CTLZ
, dl
, VT
, Tmp3
));
2219 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Tmp3
);
2224 void SelectionDAGLegalize::ExpandNode(SDNode
*Node
,
2225 SmallVectorImpl
<SDValue
> &Results
) {
2226 DebugLoc dl
= Node
->getDebugLoc();
2227 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
;
2228 switch (Node
->getOpcode()) {
2232 Tmp1
= ExpandBitCount(Node
->getOpcode(), Node
->getOperand(0), dl
);
2233 Results
.push_back(Tmp1
);
2236 Results
.push_back(ExpandBSWAP(Node
->getOperand(0), dl
));
2238 case ISD::FRAMEADDR
:
2239 case ISD::RETURNADDR
:
2240 case ISD::FRAME_TO_ARGS_OFFSET
:
2241 Results
.push_back(DAG
.getConstant(0, Node
->getValueType(0)));
2243 case ISD::FLT_ROUNDS_
:
2244 Results
.push_back(DAG
.getConstant(1, Node
->getValueType(0)));
2246 case ISD::EH_RETURN
:
2248 case ISD::DBG_LABEL
:
2251 case ISD::MEMBARRIER
:
2253 Results
.push_back(Node
->getOperand(0));
2255 case ISD::DBG_STOPPOINT
:
2256 Results
.push_back(ExpandDBG_STOPPOINT(Node
));
2258 case ISD::DYNAMIC_STACKALLOC
:
2259 ExpandDYNAMIC_STACKALLOC(Node
, Results
);
2261 case ISD::MERGE_VALUES
:
2262 for (unsigned i
= 0; i
< Node
->getNumValues(); i
++)
2263 Results
.push_back(Node
->getOperand(i
));
2266 EVT VT
= Node
->getValueType(0);
2268 Results
.push_back(DAG
.getConstant(0, VT
));
2269 else if (VT
.isFloatingPoint())
2270 Results
.push_back(DAG
.getConstantFP(0, VT
));
2272 llvm_unreachable("Unknown value type!");
2276 // If this operation is not supported, lower it to 'abort()' call
2277 TargetLowering::ArgListTy Args
;
2278 std::pair
<SDValue
, SDValue
> CallResult
=
2279 TLI
.LowerCallTo(Node
->getOperand(0), Type::getVoidTy(*DAG
.getContext()),
2280 false, false, false, false, 0, CallingConv::C
, false,
2281 /*isReturnValueUsed=*/true,
2282 DAG
.getExternalSymbol("abort", TLI
.getPointerTy()),
2284 Results
.push_back(CallResult
.second
);
2288 case ISD::BIT_CONVERT
:
2289 Tmp1
= EmitStackConvert(Node
->getOperand(0), Node
->getValueType(0),
2290 Node
->getValueType(0), dl
);
2291 Results
.push_back(Tmp1
);
2293 case ISD::FP_EXTEND
:
2294 Tmp1
= EmitStackConvert(Node
->getOperand(0),
2295 Node
->getOperand(0).getValueType(),
2296 Node
->getValueType(0), dl
);
2297 Results
.push_back(Tmp1
);
2299 case ISD::SIGN_EXTEND_INREG
: {
2300 // NOTE: we could fall back on load/store here too for targets without
2301 // SAR. However, it is doubtful that any exist.
2302 EVT ExtraVT
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
2303 unsigned BitsDiff
= Node
->getValueType(0).getSizeInBits() -
2304 ExtraVT
.getSizeInBits();
2305 SDValue ShiftCst
= DAG
.getConstant(BitsDiff
, TLI
.getShiftAmountTy());
2306 Tmp1
= DAG
.getNode(ISD::SHL
, dl
, Node
->getValueType(0),
2307 Node
->getOperand(0), ShiftCst
);
2308 Tmp1
= DAG
.getNode(ISD::SRA
, dl
, Node
->getValueType(0), Tmp1
, ShiftCst
);
2309 Results
.push_back(Tmp1
);
2312 case ISD::FP_ROUND_INREG
: {
2313 // The only way we can lower this is to turn it into a TRUNCSTORE,
2314 // EXTLOAD pair, targetting a temporary location (a stack slot).
2316 // NOTE: there is a choice here between constantly creating new stack
2317 // slots and always reusing the same one. We currently always create
2318 // new ones, as reuse may inhibit scheduling.
2319 EVT ExtraVT
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
2320 Tmp1
= EmitStackConvert(Node
->getOperand(0), ExtraVT
,
2321 Node
->getValueType(0), dl
);
2322 Results
.push_back(Tmp1
);
2325 case ISD::SINT_TO_FP
:
2326 case ISD::UINT_TO_FP
:
2327 Tmp1
= ExpandLegalINT_TO_FP(Node
->getOpcode() == ISD::SINT_TO_FP
,
2328 Node
->getOperand(0), Node
->getValueType(0), dl
);
2329 Results
.push_back(Tmp1
);
2331 case ISD::FP_TO_UINT
: {
2332 SDValue True
, False
;
2333 EVT VT
= Node
->getOperand(0).getValueType();
2334 EVT NVT
= Node
->getValueType(0);
2335 const uint64_t zero
[] = {0, 0};
2336 APFloat apf
= APFloat(APInt(VT
.getSizeInBits(), 2, zero
));
2337 APInt x
= APInt::getSignBit(NVT
.getSizeInBits());
2338 (void)apf
.convertFromAPInt(x
, false, APFloat::rmNearestTiesToEven
);
2339 Tmp1
= DAG
.getConstantFP(apf
, VT
);
2340 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
),
2341 Node
->getOperand(0),
2343 True
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, NVT
, Node
->getOperand(0));
2344 False
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, NVT
,
2345 DAG
.getNode(ISD::FSUB
, dl
, VT
,
2346 Node
->getOperand(0), Tmp1
));
2347 False
= DAG
.getNode(ISD::XOR
, dl
, NVT
, False
,
2348 DAG
.getConstant(x
, NVT
));
2349 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp2
, True
, False
);
2350 Results
.push_back(Tmp1
);
2354 const Value
*V
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
2355 EVT VT
= Node
->getValueType(0);
2356 Tmp1
= Node
->getOperand(0);
2357 Tmp2
= Node
->getOperand(1);
2358 SDValue VAList
= DAG
.getLoad(TLI
.getPointerTy(), dl
, Tmp1
, Tmp2
, V
, 0);
2359 // Increment the pointer, VAList, to the next vaarg
2360 Tmp3
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), VAList
,
2361 DAG
.getConstant(TLI
.getTargetData()->
2362 getTypeAllocSize(VT
.getTypeForEVT(*DAG
.getContext())),
2363 TLI
.getPointerTy()));
2364 // Store the incremented VAList to the legalized pointer
2365 Tmp3
= DAG
.getStore(VAList
.getValue(1), dl
, Tmp3
, Tmp2
, V
, 0);
2366 // Load the actual argument out of the pointer VAList
2367 Results
.push_back(DAG
.getLoad(VT
, dl
, Tmp3
, VAList
, NULL
, 0));
2368 Results
.push_back(Results
[0].getValue(1));
2372 // This defaults to loading a pointer from the input and storing it to the
2373 // output, returning the chain.
2374 const Value
*VD
= cast
<SrcValueSDNode
>(Node
->getOperand(3))->getValue();
2375 const Value
*VS
= cast
<SrcValueSDNode
>(Node
->getOperand(4))->getValue();
2376 Tmp1
= DAG
.getLoad(TLI
.getPointerTy(), dl
, Node
->getOperand(0),
2377 Node
->getOperand(2), VS
, 0);
2378 Tmp1
= DAG
.getStore(Tmp1
.getValue(1), dl
, Tmp1
, Node
->getOperand(1), VD
, 0);
2379 Results
.push_back(Tmp1
);
2382 case ISD::EXTRACT_VECTOR_ELT
:
2383 if (Node
->getOperand(0).getValueType().getVectorNumElements() == 1)
2384 // This must be an access of the only element. Return it.
2385 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, Node
->getValueType(0),
2386 Node
->getOperand(0));
2388 Tmp1
= ExpandExtractFromVectorThroughStack(SDValue(Node
, 0));
2389 Results
.push_back(Tmp1
);
2391 case ISD::EXTRACT_SUBVECTOR
:
2392 Results
.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node
, 0)));
2394 case ISD::CONCAT_VECTORS
: {
2395 Results
.push_back(ExpandVectorBuildThroughStack(Node
));
2398 case ISD::SCALAR_TO_VECTOR
:
2399 Results
.push_back(ExpandSCALAR_TO_VECTOR(Node
));
2401 case ISD::INSERT_VECTOR_ELT
:
2402 Results
.push_back(ExpandINSERT_VECTOR_ELT(Node
->getOperand(0),
2403 Node
->getOperand(1),
2404 Node
->getOperand(2), dl
));
2406 case ISD::VECTOR_SHUFFLE
: {
2407 SmallVector
<int, 8> Mask
;
2408 cast
<ShuffleVectorSDNode
>(Node
)->getMask(Mask
);
2410 EVT VT
= Node
->getValueType(0);
2411 EVT EltVT
= VT
.getVectorElementType();
2412 unsigned NumElems
= VT
.getVectorNumElements();
2413 SmallVector
<SDValue
, 8> Ops
;
2414 for (unsigned i
= 0; i
!= NumElems
; ++i
) {
2416 Ops
.push_back(DAG
.getUNDEF(EltVT
));
2419 unsigned Idx
= Mask
[i
];
2421 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
2422 Node
->getOperand(0),
2423 DAG
.getIntPtrConstant(Idx
)));
2425 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
2426 Node
->getOperand(1),
2427 DAG
.getIntPtrConstant(Idx
- NumElems
)));
2429 Tmp1
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, VT
, &Ops
[0], Ops
.size());
2430 Results
.push_back(Tmp1
);
2433 case ISD::EXTRACT_ELEMENT
: {
2434 EVT OpTy
= Node
->getOperand(0).getValueType();
2435 if (cast
<ConstantSDNode
>(Node
->getOperand(1))->getZExtValue()) {
2437 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, OpTy
, Node
->getOperand(0),
2438 DAG
.getConstant(OpTy
.getSizeInBits()/2,
2439 TLI
.getShiftAmountTy()));
2440 Tmp1
= DAG
.getNode(ISD::TRUNCATE
, dl
, Node
->getValueType(0), Tmp1
);
2443 Tmp1
= DAG
.getNode(ISD::TRUNCATE
, dl
, Node
->getValueType(0),
2444 Node
->getOperand(0));
2446 Results
.push_back(Tmp1
);
2449 case ISD::STACKSAVE
:
2450 // Expand to CopyFromReg if the target set
2451 // StackPointerRegisterToSaveRestore.
2452 if (unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore()) {
2453 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(0), dl
, SP
,
2454 Node
->getValueType(0)));
2455 Results
.push_back(Results
[0].getValue(1));
2457 Results
.push_back(DAG
.getUNDEF(Node
->getValueType(0)));
2458 Results
.push_back(Node
->getOperand(0));
2461 case ISD::STACKRESTORE
:
2462 // Expand to CopyToReg if the target set
2463 // StackPointerRegisterToSaveRestore.
2464 if (unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore()) {
2465 Results
.push_back(DAG
.getCopyToReg(Node
->getOperand(0), dl
, SP
,
2466 Node
->getOperand(1)));
2468 Results
.push_back(Node
->getOperand(0));
2471 case ISD::FCOPYSIGN
:
2472 Results
.push_back(ExpandFCOPYSIGN(Node
));
2475 // Expand Y = FNEG(X) -> Y = SUB -0.0, X
2476 Tmp1
= DAG
.getConstantFP(-0.0, Node
->getValueType(0));
2477 Tmp1
= DAG
.getNode(ISD::FSUB
, dl
, Node
->getValueType(0), Tmp1
,
2478 Node
->getOperand(0));
2479 Results
.push_back(Tmp1
);
2482 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X).
2483 EVT VT
= Node
->getValueType(0);
2484 Tmp1
= Node
->getOperand(0);
2485 Tmp2
= DAG
.getConstantFP(0.0, VT
);
2486 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(Tmp1
.getValueType()),
2487 Tmp1
, Tmp2
, ISD::SETUGT
);
2488 Tmp3
= DAG
.getNode(ISD::FNEG
, dl
, VT
, Tmp1
);
2489 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, VT
, Tmp2
, Tmp1
, Tmp3
);
2490 Results
.push_back(Tmp1
);
2494 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::SQRT_F32
, RTLIB::SQRT_F64
,
2495 RTLIB::SQRT_F80
, RTLIB::SQRT_PPCF128
));
2498 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::SIN_F32
, RTLIB::SIN_F64
,
2499 RTLIB::SIN_F80
, RTLIB::SIN_PPCF128
));
2502 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::COS_F32
, RTLIB::COS_F64
,
2503 RTLIB::COS_F80
, RTLIB::COS_PPCF128
));
2506 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG_F32
, RTLIB::LOG_F64
,
2507 RTLIB::LOG_F80
, RTLIB::LOG_PPCF128
));
2510 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG2_F32
, RTLIB::LOG2_F64
,
2511 RTLIB::LOG2_F80
, RTLIB::LOG2_PPCF128
));
2514 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG10_F32
, RTLIB::LOG10_F64
,
2515 RTLIB::LOG10_F80
, RTLIB::LOG10_PPCF128
));
2518 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::EXP_F32
, RTLIB::EXP_F64
,
2519 RTLIB::EXP_F80
, RTLIB::EXP_PPCF128
));
2522 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::EXP2_F32
, RTLIB::EXP2_F64
,
2523 RTLIB::EXP2_F80
, RTLIB::EXP2_PPCF128
));
2526 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::TRUNC_F32
, RTLIB::TRUNC_F64
,
2527 RTLIB::TRUNC_F80
, RTLIB::TRUNC_PPCF128
));
2530 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::FLOOR_F32
, RTLIB::FLOOR_F64
,
2531 RTLIB::FLOOR_F80
, RTLIB::FLOOR_PPCF128
));
2534 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::CEIL_F32
, RTLIB::CEIL_F64
,
2535 RTLIB::CEIL_F80
, RTLIB::CEIL_PPCF128
));
2538 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::RINT_F32
, RTLIB::RINT_F64
,
2539 RTLIB::RINT_F80
, RTLIB::RINT_PPCF128
));
2541 case ISD::FNEARBYINT
:
2542 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::NEARBYINT_F32
,
2543 RTLIB::NEARBYINT_F64
,
2544 RTLIB::NEARBYINT_F80
,
2545 RTLIB::NEARBYINT_PPCF128
));
2548 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::POWI_F32
, RTLIB::POWI_F64
,
2549 RTLIB::POWI_F80
, RTLIB::POWI_PPCF128
));
2552 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::POW_F32
, RTLIB::POW_F64
,
2553 RTLIB::POW_F80
, RTLIB::POW_PPCF128
));
2556 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::DIV_F32
, RTLIB::DIV_F64
,
2557 RTLIB::DIV_F80
, RTLIB::DIV_PPCF128
));
2560 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::REM_F32
, RTLIB::REM_F64
,
2561 RTLIB::REM_F80
, RTLIB::REM_PPCF128
));
2563 case ISD::ConstantFP
: {
2564 ConstantFPSDNode
*CFP
= cast
<ConstantFPSDNode
>(Node
);
2565 // Check to see if this FP immediate is already legal.
2566 bool isLegal
= false;
2567 for (TargetLowering::legal_fpimm_iterator I
= TLI
.legal_fpimm_begin(),
2568 E
= TLI
.legal_fpimm_end(); I
!= E
; ++I
) {
2569 if (CFP
->isExactlyValue(*I
)) {
2574 // If this is a legal constant, turn it into a TargetConstantFP node.
2576 Results
.push_back(SDValue(Node
, 0));
2578 Results
.push_back(ExpandConstantFP(CFP
, true, DAG
, TLI
));
2581 case ISD::EHSELECTION
: {
2582 unsigned Reg
= TLI
.getExceptionSelectorRegister();
2583 assert(Reg
&& "Can't expand to unknown register!");
2584 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(1), dl
, Reg
,
2585 Node
->getValueType(0)));
2586 Results
.push_back(Results
[0].getValue(1));
2589 case ISD::EXCEPTIONADDR
: {
2590 unsigned Reg
= TLI
.getExceptionAddressRegister();
2591 assert(Reg
&& "Can't expand to unknown register!");
2592 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(0), dl
, Reg
,
2593 Node
->getValueType(0)));
2594 Results
.push_back(Results
[0].getValue(1));
2598 EVT VT
= Node
->getValueType(0);
2599 assert(TLI
.isOperationLegalOrCustom(ISD::ADD
, VT
) &&
2600 TLI
.isOperationLegalOrCustom(ISD::XOR
, VT
) &&
2601 "Don't know how to expand this subtraction!");
2602 Tmp1
= DAG
.getNode(ISD::XOR
, dl
, VT
, Node
->getOperand(1),
2603 DAG
.getConstant(APInt::getAllOnesValue(VT
.getSizeInBits()), VT
));
2604 Tmp1
= DAG
.getNode(ISD::ADD
, dl
, VT
, Tmp2
, DAG
.getConstant(1, VT
));
2605 Results
.push_back(DAG
.getNode(ISD::ADD
, dl
, VT
, Node
->getOperand(0), Tmp1
));
2610 EVT VT
= Node
->getValueType(0);
2611 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2612 bool isSigned
= Node
->getOpcode() == ISD::SREM
;
2613 unsigned DivOpc
= isSigned
? ISD::SDIV
: ISD::UDIV
;
2614 unsigned DivRemOpc
= isSigned
? ISD::SDIVREM
: ISD::UDIVREM
;
2615 Tmp2
= Node
->getOperand(0);
2616 Tmp3
= Node
->getOperand(1);
2617 if (TLI
.isOperationLegalOrCustom(DivRemOpc
, VT
)) {
2618 Tmp1
= DAG
.getNode(DivRemOpc
, dl
, VTs
, Tmp2
, Tmp3
).getValue(1);
2619 } else if (TLI
.isOperationLegalOrCustom(DivOpc
, VT
)) {
2621 Tmp1
= DAG
.getNode(DivOpc
, dl
, VT
, Tmp2
, Tmp3
);
2622 Tmp1
= DAG
.getNode(ISD::MUL
, dl
, VT
, Tmp1
, Tmp3
);
2623 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, VT
, Tmp2
, Tmp1
);
2624 } else if (isSigned
) {
2625 Tmp1
= ExpandIntLibCall(Node
, true, RTLIB::SREM_I16
, RTLIB::SREM_I32
,
2626 RTLIB::SREM_I64
, RTLIB::SREM_I128
);
2628 Tmp1
= ExpandIntLibCall(Node
, false, RTLIB::UREM_I16
, RTLIB::UREM_I32
,
2629 RTLIB::UREM_I64
, RTLIB::UREM_I128
);
2631 Results
.push_back(Tmp1
);
2636 bool isSigned
= Node
->getOpcode() == ISD::SDIV
;
2637 unsigned DivRemOpc
= isSigned
? ISD::SDIVREM
: ISD::UDIVREM
;
2638 EVT VT
= Node
->getValueType(0);
2639 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2640 if (TLI
.isOperationLegalOrCustom(DivRemOpc
, VT
))
2641 Tmp1
= DAG
.getNode(DivRemOpc
, dl
, VTs
, Node
->getOperand(0),
2642 Node
->getOperand(1));
2644 Tmp1
= ExpandIntLibCall(Node
, true, RTLIB::SDIV_I16
, RTLIB::SDIV_I32
,
2645 RTLIB::SDIV_I64
, RTLIB::SDIV_I128
);
2647 Tmp1
= ExpandIntLibCall(Node
, false, RTLIB::UDIV_I16
, RTLIB::UDIV_I32
,
2648 RTLIB::UDIV_I64
, RTLIB::UDIV_I128
);
2649 Results
.push_back(Tmp1
);
2654 unsigned ExpandOpcode
= Node
->getOpcode() == ISD::MULHU
? ISD::UMUL_LOHI
:
2656 EVT VT
= Node
->getValueType(0);
2657 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2658 assert(TLI
.isOperationLegalOrCustom(ExpandOpcode
, VT
) &&
2659 "If this wasn't legal, it shouldn't have been created!");
2660 Tmp1
= DAG
.getNode(ExpandOpcode
, dl
, VTs
, Node
->getOperand(0),
2661 Node
->getOperand(1));
2662 Results
.push_back(Tmp1
.getValue(1));
2666 EVT VT
= Node
->getValueType(0);
2667 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2668 // See if multiply or divide can be lowered using two-result operations.
2669 // We just need the low half of the multiply; try both the signed
2670 // and unsigned forms. If the target supports both SMUL_LOHI and
2671 // UMUL_LOHI, form a preference by checking which forms of plain
2672 // MULH it supports.
2673 bool HasSMUL_LOHI
= TLI
.isOperationLegalOrCustom(ISD::SMUL_LOHI
, VT
);
2674 bool HasUMUL_LOHI
= TLI
.isOperationLegalOrCustom(ISD::UMUL_LOHI
, VT
);
2675 bool HasMULHS
= TLI
.isOperationLegalOrCustom(ISD::MULHS
, VT
);
2676 bool HasMULHU
= TLI
.isOperationLegalOrCustom(ISD::MULHU
, VT
);
2677 unsigned OpToUse
= 0;
2678 if (HasSMUL_LOHI
&& !HasMULHS
) {
2679 OpToUse
= ISD::SMUL_LOHI
;
2680 } else if (HasUMUL_LOHI
&& !HasMULHU
) {
2681 OpToUse
= ISD::UMUL_LOHI
;
2682 } else if (HasSMUL_LOHI
) {
2683 OpToUse
= ISD::SMUL_LOHI
;
2684 } else if (HasUMUL_LOHI
) {
2685 OpToUse
= ISD::UMUL_LOHI
;
2688 Results
.push_back(DAG
.getNode(OpToUse
, dl
, VTs
, Node
->getOperand(0),
2689 Node
->getOperand(1)));
2692 Tmp1
= ExpandIntLibCall(Node
, false, RTLIB::MUL_I16
, RTLIB::MUL_I32
,
2693 RTLIB::MUL_I64
, RTLIB::MUL_I128
);
2694 Results
.push_back(Tmp1
);
2699 SDValue LHS
= Node
->getOperand(0);
2700 SDValue RHS
= Node
->getOperand(1);
2701 SDValue Sum
= DAG
.getNode(Node
->getOpcode() == ISD::SADDO
?
2702 ISD::ADD
: ISD::SUB
, dl
, LHS
.getValueType(),
2704 Results
.push_back(Sum
);
2705 EVT OType
= Node
->getValueType(1);
2707 SDValue Zero
= DAG
.getConstant(0, LHS
.getValueType());
2709 // LHSSign -> LHS >= 0
2710 // RHSSign -> RHS >= 0
2711 // SumSign -> Sum >= 0
2714 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
2716 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
2718 SDValue LHSSign
= DAG
.getSetCC(dl
, OType
, LHS
, Zero
, ISD::SETGE
);
2719 SDValue RHSSign
= DAG
.getSetCC(dl
, OType
, RHS
, Zero
, ISD::SETGE
);
2720 SDValue SignsMatch
= DAG
.getSetCC(dl
, OType
, LHSSign
, RHSSign
,
2721 Node
->getOpcode() == ISD::SADDO
?
2722 ISD::SETEQ
: ISD::SETNE
);
2724 SDValue SumSign
= DAG
.getSetCC(dl
, OType
, Sum
, Zero
, ISD::SETGE
);
2725 SDValue SumSignNE
= DAG
.getSetCC(dl
, OType
, LHSSign
, SumSign
, ISD::SETNE
);
2727 SDValue Cmp
= DAG
.getNode(ISD::AND
, dl
, OType
, SignsMatch
, SumSignNE
);
2728 Results
.push_back(Cmp
);
2733 SDValue LHS
= Node
->getOperand(0);
2734 SDValue RHS
= Node
->getOperand(1);
2735 SDValue Sum
= DAG
.getNode(Node
->getOpcode() == ISD::UADDO
?
2736 ISD::ADD
: ISD::SUB
, dl
, LHS
.getValueType(),
2738 Results
.push_back(Sum
);
2739 Results
.push_back(DAG
.getSetCC(dl
, Node
->getValueType(1), Sum
, LHS
,
2740 Node
->getOpcode () == ISD::UADDO
?
2741 ISD::SETULT
: ISD::SETUGT
));
2746 EVT VT
= Node
->getValueType(0);
2747 SDValue LHS
= Node
->getOperand(0);
2748 SDValue RHS
= Node
->getOperand(1);
2751 static unsigned Ops
[2][3] =
2752 { { ISD::MULHU
, ISD::UMUL_LOHI
, ISD::ZERO_EXTEND
},
2753 { ISD::MULHS
, ISD::SMUL_LOHI
, ISD::SIGN_EXTEND
}};
2754 bool isSigned
= Node
->getOpcode() == ISD::SMULO
;
2755 if (TLI
.isOperationLegalOrCustom(Ops
[isSigned
][0], VT
)) {
2756 BottomHalf
= DAG
.getNode(ISD::MUL
, dl
, VT
, LHS
, RHS
);
2757 TopHalf
= DAG
.getNode(Ops
[isSigned
][0], dl
, VT
, LHS
, RHS
);
2758 } else if (TLI
.isOperationLegalOrCustom(Ops
[isSigned
][1], VT
)) {
2759 BottomHalf
= DAG
.getNode(Ops
[isSigned
][1], dl
, DAG
.getVTList(VT
, VT
), LHS
,
2761 TopHalf
= BottomHalf
.getValue(1);
2762 } else if (TLI
.isTypeLegal(EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits() * 2))) {
2763 EVT WideVT
= EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits() * 2);
2764 LHS
= DAG
.getNode(Ops
[isSigned
][2], dl
, WideVT
, LHS
);
2765 RHS
= DAG
.getNode(Ops
[isSigned
][2], dl
, WideVT
, RHS
);
2766 Tmp1
= DAG
.getNode(ISD::MUL
, dl
, WideVT
, LHS
, RHS
);
2767 BottomHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Tmp1
,
2768 DAG
.getIntPtrConstant(0));
2769 TopHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Tmp1
,
2770 DAG
.getIntPtrConstant(1));
2772 // FIXME: We should be able to fall back to a libcall with an illegal
2773 // type in some cases cases.
2774 // Also, we can fall back to a division in some cases, but that's a big
2775 // performance hit in the general case.
2776 llvm_unreachable("Don't know how to expand this operation yet!");
2779 Tmp1
= DAG
.getConstant(VT
.getSizeInBits() - 1, TLI
.getShiftAmountTy());
2780 Tmp1
= DAG
.getNode(ISD::SRA
, dl
, VT
, BottomHalf
, Tmp1
);
2781 TopHalf
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
), TopHalf
, Tmp1
,
2784 TopHalf
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
), TopHalf
,
2785 DAG
.getConstant(0, VT
), ISD::SETNE
);
2787 Results
.push_back(BottomHalf
);
2788 Results
.push_back(TopHalf
);
2791 case ISD::BUILD_PAIR
: {
2792 EVT PairTy
= Node
->getValueType(0);
2793 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, PairTy
, Node
->getOperand(0));
2794 Tmp2
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, PairTy
, Node
->getOperand(1));
2795 Tmp2
= DAG
.getNode(ISD::SHL
, dl
, PairTy
, Tmp2
,
2796 DAG
.getConstant(PairTy
.getSizeInBits()/2,
2797 TLI
.getShiftAmountTy()));
2798 Results
.push_back(DAG
.getNode(ISD::OR
, dl
, PairTy
, Tmp1
, Tmp2
));
2802 Tmp1
= Node
->getOperand(0);
2803 Tmp2
= Node
->getOperand(1);
2804 Tmp3
= Node
->getOperand(2);
2805 if (Tmp1
.getOpcode() == ISD::SETCC
) {
2806 Tmp1
= DAG
.getSelectCC(dl
, Tmp1
.getOperand(0), Tmp1
.getOperand(1),
2808 cast
<CondCodeSDNode
>(Tmp1
.getOperand(2))->get());
2810 Tmp1
= DAG
.getSelectCC(dl
, Tmp1
,
2811 DAG
.getConstant(0, Tmp1
.getValueType()),
2812 Tmp2
, Tmp3
, ISD::SETNE
);
2814 Results
.push_back(Tmp1
);
2817 SDValue Chain
= Node
->getOperand(0);
2818 SDValue Table
= Node
->getOperand(1);
2819 SDValue Index
= Node
->getOperand(2);
2821 EVT PTy
= TLI
.getPointerTy();
2822 MachineFunction
&MF
= DAG
.getMachineFunction();
2823 unsigned EntrySize
= MF
.getJumpTableInfo()->getEntrySize();
2824 Index
= DAG
.getNode(ISD::MUL
, dl
, PTy
,
2825 Index
, DAG
.getConstant(EntrySize
, PTy
));
2826 SDValue Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Index
, Table
);
2828 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(), EntrySize
* 8);
2829 SDValue LD
= DAG
.getExtLoad(ISD::SEXTLOAD
, dl
, PTy
, Chain
, Addr
,
2830 PseudoSourceValue::getJumpTable(), 0, MemVT
);
2832 if (TLI
.getTargetMachine().getRelocationModel() == Reloc::PIC_
) {
2833 // For PIC, the sequence is:
2834 // BRIND(load(Jumptable + index) + RelocBase)
2835 // RelocBase can be JumpTable, GOT or some sort of global base.
2836 Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Addr
,
2837 TLI
.getPICJumpTableRelocBase(Table
, DAG
));
2839 Tmp1
= DAG
.getNode(ISD::BRIND
, dl
, MVT::Other
, LD
.getValue(1), Addr
);
2840 Results
.push_back(Tmp1
);
2844 // Expand brcond's setcc into its constituent parts and create a BR_CC
2846 Tmp1
= Node
->getOperand(0);
2847 Tmp2
= Node
->getOperand(1);
2848 if (Tmp2
.getOpcode() == ISD::SETCC
) {
2849 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, MVT::Other
,
2850 Tmp1
, Tmp2
.getOperand(2),
2851 Tmp2
.getOperand(0), Tmp2
.getOperand(1),
2852 Node
->getOperand(2));
2854 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, MVT::Other
, Tmp1
,
2855 DAG
.getCondCode(ISD::SETNE
), Tmp2
,
2856 DAG
.getConstant(0, Tmp2
.getValueType()),
2857 Node
->getOperand(2));
2859 Results
.push_back(Tmp1
);
2862 Tmp1
= Node
->getOperand(0);
2863 Tmp2
= Node
->getOperand(1);
2864 Tmp3
= Node
->getOperand(2);
2865 LegalizeSetCCCondCode(Node
->getValueType(0), Tmp1
, Tmp2
, Tmp3
, dl
);
2867 // If we expanded the SETCC into an AND/OR, return the new node
2868 if (Tmp2
.getNode() == 0) {
2869 Results
.push_back(Tmp1
);
2873 // Otherwise, SETCC for the given comparison type must be completely
2874 // illegal; expand it into a SELECT_CC.
2875 EVT VT
= Node
->getValueType(0);
2876 Tmp1
= DAG
.getNode(ISD::SELECT_CC
, dl
, VT
, Tmp1
, Tmp2
,
2877 DAG
.getConstant(1, VT
), DAG
.getConstant(0, VT
), Tmp3
);
2878 Results
.push_back(Tmp1
);
2881 case ISD::SELECT_CC
: {
2882 Tmp1
= Node
->getOperand(0); // LHS
2883 Tmp2
= Node
->getOperand(1); // RHS
2884 Tmp3
= Node
->getOperand(2); // True
2885 Tmp4
= Node
->getOperand(3); // False
2886 SDValue CC
= Node
->getOperand(4);
2888 LegalizeSetCCCondCode(TLI
.getSetCCResultType(Tmp1
.getValueType()),
2889 Tmp1
, Tmp2
, CC
, dl
);
2891 assert(!Tmp2
.getNode() && "Can't legalize SELECT_CC with legal condition!");
2892 Tmp2
= DAG
.getConstant(0, Tmp1
.getValueType());
2893 CC
= DAG
.getCondCode(ISD::SETNE
);
2894 Tmp1
= DAG
.getNode(ISD::SELECT_CC
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
2896 Results
.push_back(Tmp1
);
2900 Tmp1
= Node
->getOperand(0); // Chain
2901 Tmp2
= Node
->getOperand(2); // LHS
2902 Tmp3
= Node
->getOperand(3); // RHS
2903 Tmp4
= Node
->getOperand(1); // CC
2905 LegalizeSetCCCondCode(TLI
.getSetCCResultType(Tmp2
.getValueType()),
2906 Tmp2
, Tmp3
, Tmp4
, dl
);
2907 LastCALLSEQ_END
= DAG
.getEntryNode();
2909 assert(!Tmp3
.getNode() && "Can't legalize BR_CC with legal condition!");
2910 Tmp3
= DAG
.getConstant(0, Tmp2
.getValueType());
2911 Tmp4
= DAG
.getCondCode(ISD::SETNE
);
2912 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, Node
->getValueType(0), Tmp1
, Tmp4
, Tmp2
,
2913 Tmp3
, Node
->getOperand(4));
2914 Results
.push_back(Tmp1
);
2917 case ISD::GLOBAL_OFFSET_TABLE
:
2918 case ISD::GlobalAddress
:
2919 case ISD::GlobalTLSAddress
:
2920 case ISD::ExternalSymbol
:
2921 case ISD::ConstantPool
:
2922 case ISD::JumpTable
:
2923 case ISD::INTRINSIC_W_CHAIN
:
2924 case ISD::INTRINSIC_WO_CHAIN
:
2925 case ISD::INTRINSIC_VOID
:
2926 // FIXME: Custom lowering for these operations shouldn't return null!
2927 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
2928 Results
.push_back(SDValue(Node
, i
));
2932 void SelectionDAGLegalize::PromoteNode(SDNode
*Node
,
2933 SmallVectorImpl
<SDValue
> &Results
) {
2934 EVT OVT
= Node
->getValueType(0);
2935 if (Node
->getOpcode() == ISD::UINT_TO_FP
||
2936 Node
->getOpcode() == ISD::SINT_TO_FP
||
2937 Node
->getOpcode() == ISD::SETCC
) {
2938 OVT
= Node
->getOperand(0).getValueType();
2940 EVT NVT
= TLI
.getTypeToPromoteTo(Node
->getOpcode(), OVT
);
2941 DebugLoc dl
= Node
->getDebugLoc();
2942 SDValue Tmp1
, Tmp2
, Tmp3
;
2943 switch (Node
->getOpcode()) {
2947 // Zero extend the argument.
2948 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, NVT
, Node
->getOperand(0));
2949 // Perform the larger operation.
2950 Tmp1
= DAG
.getNode(Node
->getOpcode(), dl
, NVT
, Tmp1
);
2951 if (Node
->getOpcode() == ISD::CTTZ
) {
2952 //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT)
2953 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(NVT
),
2954 Tmp1
, DAG
.getConstant(NVT
.getSizeInBits(), NVT
),
2956 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp2
,
2957 DAG
.getConstant(OVT
.getSizeInBits(), NVT
), Tmp1
);
2958 } else if (Node
->getOpcode() == ISD::CTLZ
) {
2959 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
2960 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, NVT
, Tmp1
,
2961 DAG
.getConstant(NVT
.getSizeInBits() -
2962 OVT
.getSizeInBits(), NVT
));
2964 Results
.push_back(DAG
.getNode(ISD::TRUNCATE
, dl
, OVT
, Tmp1
));
2967 unsigned DiffBits
= NVT
.getSizeInBits() - OVT
.getSizeInBits();
2968 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, NVT
, Tmp1
);
2969 Tmp1
= DAG
.getNode(ISD::BSWAP
, dl
, NVT
, Tmp1
);
2970 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, NVT
, Tmp1
,
2971 DAG
.getConstant(DiffBits
, TLI
.getShiftAmountTy()));
2972 Results
.push_back(Tmp1
);
2975 case ISD::FP_TO_UINT
:
2976 case ISD::FP_TO_SINT
:
2977 Tmp1
= PromoteLegalFP_TO_INT(Node
->getOperand(0), Node
->getValueType(0),
2978 Node
->getOpcode() == ISD::FP_TO_SINT
, dl
);
2979 Results
.push_back(Tmp1
);
2981 case ISD::UINT_TO_FP
:
2982 case ISD::SINT_TO_FP
:
2983 Tmp1
= PromoteLegalINT_TO_FP(Node
->getOperand(0), Node
->getValueType(0),
2984 Node
->getOpcode() == ISD::SINT_TO_FP
, dl
);
2985 Results
.push_back(Tmp1
);
2990 unsigned ExtOp
, TruncOp
;
2991 if (OVT
.isVector()) {
2992 ExtOp
= ISD::BIT_CONVERT
;
2993 TruncOp
= ISD::BIT_CONVERT
;
2994 } else if (OVT
.isInteger()) {
2995 ExtOp
= ISD::ANY_EXTEND
;
2996 TruncOp
= ISD::TRUNCATE
;
2998 llvm_report_error("Cannot promote logic operation");
3000 // Promote each of the values to the new type.
3001 Tmp1
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(0));
3002 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3003 // Perform the larger operation, then convert back
3004 Tmp1
= DAG
.getNode(Node
->getOpcode(), dl
, NVT
, Tmp1
, Tmp2
);
3005 Results
.push_back(DAG
.getNode(TruncOp
, dl
, OVT
, Tmp1
));
3009 unsigned ExtOp
, TruncOp
;
3010 if (Node
->getValueType(0).isVector()) {
3011 ExtOp
= ISD::BIT_CONVERT
;
3012 TruncOp
= ISD::BIT_CONVERT
;
3013 } else if (Node
->getValueType(0).isInteger()) {
3014 ExtOp
= ISD::ANY_EXTEND
;
3015 TruncOp
= ISD::TRUNCATE
;
3017 ExtOp
= ISD::FP_EXTEND
;
3018 TruncOp
= ISD::FP_ROUND
;
3020 Tmp1
= Node
->getOperand(0);
3021 // Promote each of the values to the new type.
3022 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3023 Tmp3
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(2));
3024 // Perform the larger operation, then round down.
3025 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp1
, Tmp2
, Tmp3
);
3026 if (TruncOp
!= ISD::FP_ROUND
)
3027 Tmp1
= DAG
.getNode(TruncOp
, dl
, Node
->getValueType(0), Tmp1
);
3029 Tmp1
= DAG
.getNode(TruncOp
, dl
, Node
->getValueType(0), Tmp1
,
3030 DAG
.getIntPtrConstant(0));
3031 Results
.push_back(Tmp1
);
3034 case ISD::VECTOR_SHUFFLE
: {
3035 SmallVector
<int, 8> Mask
;
3036 cast
<ShuffleVectorSDNode
>(Node
)->getMask(Mask
);
3038 // Cast the two input vectors.
3039 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, NVT
, Node
->getOperand(0));
3040 Tmp2
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, NVT
, Node
->getOperand(1));
3042 // Convert the shuffle mask to the right # elements.
3043 Tmp1
= ShuffleWithNarrowerEltType(NVT
, OVT
, dl
, Tmp1
, Tmp2
, Mask
);
3044 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, OVT
, Tmp1
);
3045 Results
.push_back(Tmp1
);
3049 unsigned ExtOp
= ISD::FP_EXTEND
;
3050 if (NVT
.isInteger()) {
3051 ISD::CondCode CCCode
=
3052 cast
<CondCodeSDNode
>(Node
->getOperand(2))->get();
3053 ExtOp
= isSignedIntSetCC(CCCode
) ? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
;
3055 Tmp1
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(0));
3056 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3057 Results
.push_back(DAG
.getNode(ISD::SETCC
, dl
, Node
->getValueType(0),
3058 Tmp1
, Tmp2
, Node
->getOperand(2)));
3064 // SelectionDAG::Legalize - This is the entry point for the file.
3066 void SelectionDAG::Legalize(bool TypesNeedLegalizing
,
3067 CodeGenOpt::Level OptLevel
) {
3068 /// run - This is the main entry point to this class.
3070 SelectionDAGLegalize(*this, OptLevel
).LegalizeDAG();