1 //===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SelectionDAG::Legalize method.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineJumpTableInfo.h"
18 #include "llvm/CodeGen/MachineModuleInfo.h"
19 #include "llvm/CodeGen/DwarfWriter.h"
20 #include "llvm/Analysis/DebugInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/Target/TargetFrameInfo.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/Target/TargetData.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetOptions.h"
27 #include "llvm/Target/TargetSubtarget.h"
28 #include "llvm/CallingConv.h"
29 #include "llvm/Constants.h"
30 #include "llvm/DerivedTypes.h"
31 #include "llvm/Function.h"
32 #include "llvm/GlobalVariable.h"
33 #include "llvm/LLVMContext.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DenseMap.h"
40 #include "llvm/ADT/SmallVector.h"
41 #include "llvm/ADT/SmallPtrSet.h"
45 //===----------------------------------------------------------------------===//
46 /// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and
47 /// hacks on it until the target machine can handle it. This involves
48 /// eliminating value sizes the machine cannot handle (promoting small sizes to
49 /// large sizes or splitting up large values into small values) as well as
50 /// eliminating operations the machine cannot handle.
52 /// This code also does a small amount of optimization and recognition of idioms
53 /// as part of its processing. For example, if a target does not support a
54 /// 'setcc' instruction efficiently, but does support 'brcc' instruction, this
55 /// will attempt merge setcc and brc instructions into brcc's.
58 class VISIBILITY_HIDDEN SelectionDAGLegalize
{
61 CodeGenOpt::Level OptLevel
;
63 // Libcall insertion helpers.
65 /// LastCALLSEQ_END - This keeps track of the CALLSEQ_END node that has been
66 /// legalized. We use this to ensure that calls are properly serialized
67 /// against each other, including inserted libcalls.
68 SDValue LastCALLSEQ_END
;
70 /// IsLegalizingCall - This member is used *only* for purposes of providing
71 /// helpful assertions that a libcall isn't created while another call is
72 /// being legalized (which could lead to non-serialized call sequences).
73 bool IsLegalizingCall
;
76 Legal
, // The target natively supports this operation.
77 Promote
, // This operation should be executed in a larger type.
78 Expand
// Try to expand this to other ops, otherwise use a libcall.
81 /// ValueTypeActions - This is a bitvector that contains two bits for each
82 /// value type, where the two bits correspond to the LegalizeAction enum.
83 /// This can be queried with "getTypeAction(VT)".
84 TargetLowering::ValueTypeActionImpl ValueTypeActions
;
86 /// LegalizedNodes - For nodes that are of legal width, and that have more
87 /// than one use, this map indicates what regularized operand to use. This
88 /// allows us to avoid legalizing the same thing more than once.
89 DenseMap
<SDValue
, SDValue
> LegalizedNodes
;
91 void AddLegalizedOperand(SDValue From
, SDValue To
) {
92 LegalizedNodes
.insert(std::make_pair(From
, To
));
93 // If someone requests legalization of the new node, return itself.
95 LegalizedNodes
.insert(std::make_pair(To
, To
));
99 SelectionDAGLegalize(SelectionDAG
&DAG
, CodeGenOpt::Level ol
);
101 /// getTypeAction - Return how we should legalize values of this type, either
102 /// it is already legal or we need to expand it into multiple registers of
103 /// smaller integer type, or we need to promote it to a larger type.
104 LegalizeAction
getTypeAction(EVT VT
) const {
106 (LegalizeAction
)ValueTypeActions
.getTypeAction(*DAG
.getContext(), VT
);
109 /// isTypeLegal - Return true if this type is legal on this target.
111 bool isTypeLegal(EVT VT
) const {
112 return getTypeAction(VT
) == Legal
;
118 /// LegalizeOp - We know that the specified value has a legal type.
119 /// Recursively ensure that the operands have legal types, then return the
121 SDValue
LegalizeOp(SDValue O
);
123 SDValue
OptimizeFloatStore(StoreSDNode
*ST
);
125 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
126 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
127 /// is necessary to spill the vector being inserted into to memory, perform
128 /// the insert there, and then read the result back.
129 SDValue
PerformInsertVectorEltInMemory(SDValue Vec
, SDValue Val
,
130 SDValue Idx
, DebugLoc dl
);
131 SDValue
ExpandINSERT_VECTOR_ELT(SDValue Vec
, SDValue Val
,
132 SDValue Idx
, DebugLoc dl
);
134 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
135 /// performs the same shuffe in terms of order or result bytes, but on a type
136 /// whose vector element type is narrower than the original shuffle type.
137 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
138 SDValue
ShuffleWithNarrowerEltType(EVT NVT
, EVT VT
, DebugLoc dl
,
139 SDValue N1
, SDValue N2
,
140 SmallVectorImpl
<int> &Mask
) const;
142 bool LegalizeAllNodesNotLeadingTo(SDNode
*N
, SDNode
*Dest
,
143 SmallPtrSet
<SDNode
*, 32> &NodesLeadingTo
);
145 void LegalizeSetCCCondCode(EVT VT
, SDValue
&LHS
, SDValue
&RHS
, SDValue
&CC
,
148 SDValue
ExpandLibCall(RTLIB::Libcall LC
, SDNode
*Node
, bool isSigned
);
149 SDValue
ExpandFPLibCall(SDNode
*Node
, RTLIB::Libcall Call_F32
,
150 RTLIB::Libcall Call_F64
, RTLIB::Libcall Call_F80
,
151 RTLIB::Libcall Call_PPCF128
);
152 SDValue
ExpandIntLibCall(SDNode
*Node
, bool isSigned
, RTLIB::Libcall Call_I16
,
153 RTLIB::Libcall Call_I32
, RTLIB::Libcall Call_I64
,
154 RTLIB::Libcall Call_I128
);
156 SDValue
EmitStackConvert(SDValue SrcOp
, EVT SlotVT
, EVT DestVT
, DebugLoc dl
);
157 SDValue
ExpandBUILD_VECTOR(SDNode
*Node
);
158 SDValue
ExpandSCALAR_TO_VECTOR(SDNode
*Node
);
159 SDValue
ExpandDBG_STOPPOINT(SDNode
*Node
);
160 void ExpandDYNAMIC_STACKALLOC(SDNode
*Node
,
161 SmallVectorImpl
<SDValue
> &Results
);
162 SDValue
ExpandFCOPYSIGN(SDNode
*Node
);
163 SDValue
ExpandLegalINT_TO_FP(bool isSigned
, SDValue LegalOp
, EVT DestVT
,
165 SDValue
PromoteLegalINT_TO_FP(SDValue LegalOp
, EVT DestVT
, bool isSigned
,
167 SDValue
PromoteLegalFP_TO_INT(SDValue LegalOp
, EVT DestVT
, bool isSigned
,
170 SDValue
ExpandBSWAP(SDValue Op
, DebugLoc dl
);
171 SDValue
ExpandBitCount(unsigned Opc
, SDValue Op
, DebugLoc dl
);
173 SDValue
ExpandExtractFromVectorThroughStack(SDValue Op
);
174 SDValue
ExpandVectorBuildThroughStack(SDNode
* Node
);
176 void ExpandNode(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
177 void PromoteNode(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
181 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
182 /// performs the same shuffe in terms of order or result bytes, but on a type
183 /// whose vector element type is narrower than the original shuffle type.
184 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
186 SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT
, EVT VT
, DebugLoc dl
,
187 SDValue N1
, SDValue N2
,
188 SmallVectorImpl
<int> &Mask
) const {
189 EVT EltVT
= NVT
.getVectorElementType();
190 unsigned NumMaskElts
= VT
.getVectorNumElements();
191 unsigned NumDestElts
= NVT
.getVectorNumElements();
192 unsigned NumEltsGrowth
= NumDestElts
/ NumMaskElts
;
194 assert(NumEltsGrowth
&& "Cannot promote to vector type with fewer elts!");
196 if (NumEltsGrowth
== 1)
197 return DAG
.getVectorShuffle(NVT
, dl
, N1
, N2
, &Mask
[0]);
199 SmallVector
<int, 8> NewMask
;
200 for (unsigned i
= 0; i
!= NumMaskElts
; ++i
) {
202 for (unsigned j
= 0; j
!= NumEltsGrowth
; ++j
) {
204 NewMask
.push_back(-1);
206 NewMask
.push_back(Idx
* NumEltsGrowth
+ j
);
209 assert(NewMask
.size() == NumDestElts
&& "Non-integer NumEltsGrowth?");
210 assert(TLI
.isShuffleMaskLegal(NewMask
, NVT
) && "Shuffle not legal?");
211 return DAG
.getVectorShuffle(NVT
, dl
, N1
, N2
, &NewMask
[0]);
214 SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG
&dag
,
215 CodeGenOpt::Level ol
)
216 : TLI(dag
.getTargetLoweringInfo()), DAG(dag
), OptLevel(ol
),
217 ValueTypeActions(TLI
.getValueTypeActions()) {
218 assert(MVT::LAST_VALUETYPE
<= MVT::MAX_ALLOWED_VALUETYPE
&&
219 "Too many value types for ValueTypeActions to hold!");
222 void SelectionDAGLegalize::LegalizeDAG() {
223 LastCALLSEQ_END
= DAG
.getEntryNode();
224 IsLegalizingCall
= false;
226 // The legalize process is inherently a bottom-up recursive process (users
227 // legalize their uses before themselves). Given infinite stack space, we
228 // could just start legalizing on the root and traverse the whole graph. In
229 // practice however, this causes us to run out of stack space on large basic
230 // blocks. To avoid this problem, compute an ordering of the nodes where each
231 // node is only legalized after all of its operands are legalized.
232 DAG
.AssignTopologicalOrder();
233 for (SelectionDAG::allnodes_iterator I
= DAG
.allnodes_begin(),
234 E
= prior(DAG
.allnodes_end()); I
!= next(E
); ++I
)
235 LegalizeOp(SDValue(I
, 0));
237 // Finally, it's possible the root changed. Get the new root.
238 SDValue OldRoot
= DAG
.getRoot();
239 assert(LegalizedNodes
.count(OldRoot
) && "Root didn't get legalized?");
240 DAG
.setRoot(LegalizedNodes
[OldRoot
]);
242 LegalizedNodes
.clear();
244 // Remove dead nodes now.
245 DAG
.RemoveDeadNodes();
249 /// FindCallEndFromCallStart - Given a chained node that is part of a call
250 /// sequence, find the CALLSEQ_END node that terminates the call sequence.
251 static SDNode
*FindCallEndFromCallStart(SDNode
*Node
) {
252 if (Node
->getOpcode() == ISD::CALLSEQ_END
)
254 if (Node
->use_empty())
255 return 0; // No CallSeqEnd
257 // The chain is usually at the end.
258 SDValue
TheChain(Node
, Node
->getNumValues()-1);
259 if (TheChain
.getValueType() != MVT::Other
) {
260 // Sometimes it's at the beginning.
261 TheChain
= SDValue(Node
, 0);
262 if (TheChain
.getValueType() != MVT::Other
) {
263 // Otherwise, hunt for it.
264 for (unsigned i
= 1, e
= Node
->getNumValues(); i
!= e
; ++i
)
265 if (Node
->getValueType(i
) == MVT::Other
) {
266 TheChain
= SDValue(Node
, i
);
270 // Otherwise, we walked into a node without a chain.
271 if (TheChain
.getValueType() != MVT::Other
)
276 for (SDNode::use_iterator UI
= Node
->use_begin(),
277 E
= Node
->use_end(); UI
!= E
; ++UI
) {
279 // Make sure to only follow users of our token chain.
281 for (unsigned i
= 0, e
= User
->getNumOperands(); i
!= e
; ++i
)
282 if (User
->getOperand(i
) == TheChain
)
283 if (SDNode
*Result
= FindCallEndFromCallStart(User
))
289 /// FindCallStartFromCallEnd - Given a chained node that is part of a call
290 /// sequence, find the CALLSEQ_START node that initiates the call sequence.
291 static SDNode
*FindCallStartFromCallEnd(SDNode
*Node
) {
292 assert(Node
&& "Didn't find callseq_start for a call??");
293 if (Node
->getOpcode() == ISD::CALLSEQ_START
) return Node
;
295 assert(Node
->getOperand(0).getValueType() == MVT::Other
&&
296 "Node doesn't have a token chain argument!");
297 return FindCallStartFromCallEnd(Node
->getOperand(0).getNode());
300 /// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to
301 /// see if any uses can reach Dest. If no dest operands can get to dest,
302 /// legalize them, legalize ourself, and return false, otherwise, return true.
304 /// Keep track of the nodes we fine that actually do lead to Dest in
305 /// NodesLeadingTo. This avoids retraversing them exponential number of times.
307 bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode
*N
, SDNode
*Dest
,
308 SmallPtrSet
<SDNode
*, 32> &NodesLeadingTo
) {
309 if (N
== Dest
) return true; // N certainly leads to Dest :)
311 // If we've already processed this node and it does lead to Dest, there is no
312 // need to reprocess it.
313 if (NodesLeadingTo
.count(N
)) return true;
315 // If the first result of this node has been already legalized, then it cannot
317 if (LegalizedNodes
.count(SDValue(N
, 0))) return false;
319 // Okay, this node has not already been legalized. Check and legalize all
320 // operands. If none lead to Dest, then we can legalize this node.
321 bool OperandsLeadToDest
= false;
322 for (unsigned i
= 0, e
= N
->getNumOperands(); i
!= e
; ++i
)
323 OperandsLeadToDest
|= // If an operand leads to Dest, so do we.
324 LegalizeAllNodesNotLeadingTo(N
->getOperand(i
).getNode(), Dest
, NodesLeadingTo
);
326 if (OperandsLeadToDest
) {
327 NodesLeadingTo
.insert(N
);
331 // Okay, this node looks safe, legalize it and return false.
332 LegalizeOp(SDValue(N
, 0));
336 /// ExpandConstantFP - Expands the ConstantFP node to an integer constant or
337 /// a load from the constant pool.
338 static SDValue
ExpandConstantFP(ConstantFPSDNode
*CFP
, bool UseCP
,
339 SelectionDAG
&DAG
, const TargetLowering
&TLI
) {
341 DebugLoc dl
= CFP
->getDebugLoc();
343 // If a FP immediate is precise when represented as a float and if the
344 // target can do an extending load from float to double, we put it into
345 // the constant pool as a float, even if it's is statically typed as a
346 // double. This shrinks FP constants and canonicalizes them for targets where
347 // an FP extending load is the same cost as a normal load (such as on the x87
348 // fp stack or PPC FP unit).
349 EVT VT
= CFP
->getValueType(0);
350 ConstantFP
*LLVMC
= const_cast<ConstantFP
*>(CFP
->getConstantFPValue());
352 assert((VT
== MVT::f64
|| VT
== MVT::f32
) && "Invalid type expansion");
353 return DAG
.getConstant(LLVMC
->getValueAPF().bitcastToAPInt(),
354 (VT
== MVT::f64
) ? MVT::i64
: MVT::i32
);
359 while (SVT
!= MVT::f32
) {
360 SVT
= (MVT::SimpleValueType
)(SVT
.getSimpleVT().SimpleTy
- 1);
361 if (CFP
->isValueValidForType(SVT
, CFP
->getValueAPF()) &&
362 // Only do this if the target has a native EXTLOAD instruction from
364 TLI
.isLoadExtLegal(ISD::EXTLOAD
, SVT
) &&
365 TLI
.ShouldShrinkFPConstant(OrigVT
)) {
366 const Type
*SType
= SVT
.getTypeForEVT(*DAG
.getContext());
367 LLVMC
= cast
<ConstantFP
>(ConstantExpr::getFPTrunc(LLVMC
, SType
));
373 SDValue CPIdx
= DAG
.getConstantPool(LLVMC
, TLI
.getPointerTy());
374 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
376 return DAG
.getExtLoad(ISD::EXTLOAD
, dl
,
377 OrigVT
, DAG
.getEntryNode(),
378 CPIdx
, PseudoSourceValue::getConstantPool(),
379 0, VT
, false, Alignment
);
380 return DAG
.getLoad(OrigVT
, dl
, DAG
.getEntryNode(), CPIdx
,
381 PseudoSourceValue::getConstantPool(), 0, false, Alignment
);
384 /// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores.
386 SDValue
ExpandUnalignedStore(StoreSDNode
*ST
, SelectionDAG
&DAG
,
387 const TargetLowering
&TLI
) {
388 SDValue Chain
= ST
->getChain();
389 SDValue Ptr
= ST
->getBasePtr();
390 SDValue Val
= ST
->getValue();
391 EVT VT
= Val
.getValueType();
392 int Alignment
= ST
->getAlignment();
393 int SVOffset
= ST
->getSrcValueOffset();
394 DebugLoc dl
= ST
->getDebugLoc();
395 if (ST
->getMemoryVT().isFloatingPoint() ||
396 ST
->getMemoryVT().isVector()) {
397 EVT intVT
= EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits());
398 if (TLI
.isTypeLegal(intVT
)) {
399 // Expand to a bitconvert of the value to the integer type of the
400 // same size, then a (misaligned) int store.
401 // FIXME: Does not handle truncating floating point stores!
402 SDValue Result
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, intVT
, Val
);
403 return DAG
.getStore(Chain
, dl
, Result
, Ptr
, ST
->getSrcValue(),
404 SVOffset
, ST
->isVolatile(), Alignment
);
406 // Do a (aligned) store to a stack slot, then copy from the stack slot
407 // to the final destination using (unaligned) integer loads and stores.
408 EVT StoredVT
= ST
->getMemoryVT();
410 TLI
.getRegisterType(*DAG
.getContext(), EVT::getIntegerVT(*DAG
.getContext(), StoredVT
.getSizeInBits()));
411 unsigned StoredBytes
= StoredVT
.getSizeInBits() / 8;
412 unsigned RegBytes
= RegVT
.getSizeInBits() / 8;
413 unsigned NumRegs
= (StoredBytes
+ RegBytes
- 1) / RegBytes
;
415 // Make sure the stack slot is also aligned for the register type.
416 SDValue StackPtr
= DAG
.CreateStackTemporary(StoredVT
, RegVT
);
418 // Perform the original store, only redirected to the stack slot.
419 SDValue Store
= DAG
.getTruncStore(Chain
, dl
,
420 Val
, StackPtr
, NULL
, 0, StoredVT
);
421 SDValue Increment
= DAG
.getConstant(RegBytes
, TLI
.getPointerTy());
422 SmallVector
<SDValue
, 8> Stores
;
425 // Do all but one copies using the full register width.
426 for (unsigned i
= 1; i
< NumRegs
; i
++) {
427 // Load one integer register's worth from the stack slot.
428 SDValue Load
= DAG
.getLoad(RegVT
, dl
, Store
, StackPtr
, NULL
, 0);
429 // Store it to the final location. Remember the store.
430 Stores
.push_back(DAG
.getStore(Load
.getValue(1), dl
, Load
, Ptr
,
431 ST
->getSrcValue(), SVOffset
+ Offset
,
433 MinAlign(ST
->getAlignment(), Offset
)));
434 // Increment the pointers.
436 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(), StackPtr
,
438 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
, Increment
);
441 // The last store may be partial. Do a truncating store. On big-endian
442 // machines this requires an extending load from the stack slot to ensure
443 // that the bits are in the right place.
444 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(), 8 * (StoredBytes
- Offset
));
446 // Load from the stack slot.
447 SDValue Load
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
, RegVT
, Store
, StackPtr
,
450 Stores
.push_back(DAG
.getTruncStore(Load
.getValue(1), dl
, Load
, Ptr
,
451 ST
->getSrcValue(), SVOffset
+ Offset
,
452 MemVT
, ST
->isVolatile(),
453 MinAlign(ST
->getAlignment(), Offset
)));
454 // The order of the stores doesn't matter - say it with a TokenFactor.
455 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, &Stores
[0],
459 assert(ST
->getMemoryVT().isInteger() &&
460 !ST
->getMemoryVT().isVector() &&
461 "Unaligned store of unknown type.");
462 // Get the half-size VT
464 (MVT::SimpleValueType
)(ST
->getMemoryVT().getSimpleVT().SimpleTy
- 1);
465 int NumBits
= NewStoredVT
.getSizeInBits();
466 int IncrementSize
= NumBits
/ 8;
468 // Divide the stored value in two parts.
469 SDValue ShiftAmount
= DAG
.getConstant(NumBits
, TLI
.getShiftAmountTy());
471 SDValue Hi
= DAG
.getNode(ISD::SRL
, dl
, VT
, Val
, ShiftAmount
);
473 // Store the two parts
474 SDValue Store1
, Store2
;
475 Store1
= DAG
.getTruncStore(Chain
, dl
, TLI
.isLittleEndian()?Lo
:Hi
, Ptr
,
476 ST
->getSrcValue(), SVOffset
, NewStoredVT
,
477 ST
->isVolatile(), Alignment
);
478 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
479 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
480 Alignment
= MinAlign(Alignment
, IncrementSize
);
481 Store2
= DAG
.getTruncStore(Chain
, dl
, TLI
.isLittleEndian()?Hi
:Lo
, Ptr
,
482 ST
->getSrcValue(), SVOffset
+ IncrementSize
,
483 NewStoredVT
, ST
->isVolatile(), Alignment
);
485 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Store1
, Store2
);
488 /// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads.
490 SDValue
ExpandUnalignedLoad(LoadSDNode
*LD
, SelectionDAG
&DAG
,
491 const TargetLowering
&TLI
) {
492 int SVOffset
= LD
->getSrcValueOffset();
493 SDValue Chain
= LD
->getChain();
494 SDValue Ptr
= LD
->getBasePtr();
495 EVT VT
= LD
->getValueType(0);
496 EVT LoadedVT
= LD
->getMemoryVT();
497 DebugLoc dl
= LD
->getDebugLoc();
498 if (VT
.isFloatingPoint() || VT
.isVector()) {
499 EVT intVT
= EVT::getIntegerVT(*DAG
.getContext(), LoadedVT
.getSizeInBits());
500 if (TLI
.isTypeLegal(intVT
)) {
501 // Expand to a (misaligned) integer load of the same size,
502 // then bitconvert to floating point or vector.
503 SDValue newLoad
= DAG
.getLoad(intVT
, dl
, Chain
, Ptr
, LD
->getSrcValue(),
504 SVOffset
, LD
->isVolatile(),
506 SDValue Result
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, LoadedVT
, newLoad
);
507 if (VT
.isFloatingPoint() && LoadedVT
!= VT
)
508 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
, VT
, Result
);
510 SDValue Ops
[] = { Result
, Chain
};
511 return DAG
.getMergeValues(Ops
, 2, dl
);
513 // Copy the value to a (aligned) stack slot using (unaligned) integer
514 // loads and stores, then do a (aligned) load from the stack slot.
515 EVT RegVT
= TLI
.getRegisterType(*DAG
.getContext(), intVT
);
516 unsigned LoadedBytes
= LoadedVT
.getSizeInBits() / 8;
517 unsigned RegBytes
= RegVT
.getSizeInBits() / 8;
518 unsigned NumRegs
= (LoadedBytes
+ RegBytes
- 1) / RegBytes
;
520 // Make sure the stack slot is also aligned for the register type.
521 SDValue StackBase
= DAG
.CreateStackTemporary(LoadedVT
, RegVT
);
523 SDValue Increment
= DAG
.getConstant(RegBytes
, TLI
.getPointerTy());
524 SmallVector
<SDValue
, 8> Stores
;
525 SDValue StackPtr
= StackBase
;
528 // Do all but one copies using the full register width.
529 for (unsigned i
= 1; i
< NumRegs
; i
++) {
530 // Load one integer register's worth from the original location.
531 SDValue Load
= DAG
.getLoad(RegVT
, dl
, Chain
, Ptr
, LD
->getSrcValue(),
532 SVOffset
+ Offset
, LD
->isVolatile(),
533 MinAlign(LD
->getAlignment(), Offset
));
534 // Follow the load with a store to the stack slot. Remember the store.
535 Stores
.push_back(DAG
.getStore(Load
.getValue(1), dl
, Load
, StackPtr
,
537 // Increment the pointers.
539 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
, Increment
);
540 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(), StackPtr
,
544 // The last copy may be partial. Do an extending load.
545 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(), 8 * (LoadedBytes
- Offset
));
546 SDValue Load
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
, RegVT
, Chain
, Ptr
,
547 LD
->getSrcValue(), SVOffset
+ Offset
,
548 MemVT
, LD
->isVolatile(),
549 MinAlign(LD
->getAlignment(), Offset
));
550 // Follow the load with a store to the stack slot. Remember the store.
551 // On big-endian machines this requires a truncating store to ensure
552 // that the bits end up in the right place.
553 Stores
.push_back(DAG
.getTruncStore(Load
.getValue(1), dl
, Load
, StackPtr
,
556 // The order of the stores doesn't matter - say it with a TokenFactor.
557 SDValue TF
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, &Stores
[0],
560 // Finally, perform the original load only redirected to the stack slot.
561 Load
= DAG
.getExtLoad(LD
->getExtensionType(), dl
, VT
, TF
, StackBase
,
564 // Callers expect a MERGE_VALUES node.
565 SDValue Ops
[] = { Load
, TF
};
566 return DAG
.getMergeValues(Ops
, 2, dl
);
569 assert(LoadedVT
.isInteger() && !LoadedVT
.isVector() &&
570 "Unaligned load of unsupported type.");
572 // Compute the new VT that is half the size of the old one. This is an
574 unsigned NumBits
= LoadedVT
.getSizeInBits();
576 NewLoadedVT
= EVT::getIntegerVT(*DAG
.getContext(), NumBits
/2);
579 unsigned Alignment
= LD
->getAlignment();
580 unsigned IncrementSize
= NumBits
/ 8;
581 ISD::LoadExtType HiExtType
= LD
->getExtensionType();
583 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
584 if (HiExtType
== ISD::NON_EXTLOAD
)
585 HiExtType
= ISD::ZEXTLOAD
;
587 // Load the value in two parts
589 if (TLI
.isLittleEndian()) {
590 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
, VT
, Chain
, Ptr
, LD
->getSrcValue(),
591 SVOffset
, NewLoadedVT
, LD
->isVolatile(), Alignment
);
592 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
593 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
594 Hi
= DAG
.getExtLoad(HiExtType
, dl
, VT
, Chain
, Ptr
, LD
->getSrcValue(),
595 SVOffset
+ IncrementSize
, NewLoadedVT
, LD
->isVolatile(),
596 MinAlign(Alignment
, IncrementSize
));
598 Hi
= DAG
.getExtLoad(HiExtType
, dl
, VT
, Chain
, Ptr
, LD
->getSrcValue(),
599 SVOffset
, NewLoadedVT
, LD
->isVolatile(), Alignment
);
600 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
601 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
602 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
, VT
, Chain
, Ptr
, LD
->getSrcValue(),
603 SVOffset
+ IncrementSize
, NewLoadedVT
, LD
->isVolatile(),
604 MinAlign(Alignment
, IncrementSize
));
607 // aggregate the two parts
608 SDValue ShiftAmount
= DAG
.getConstant(NumBits
, TLI
.getShiftAmountTy());
609 SDValue Result
= DAG
.getNode(ISD::SHL
, dl
, VT
, Hi
, ShiftAmount
);
610 Result
= DAG
.getNode(ISD::OR
, dl
, VT
, Result
, Lo
);
612 SDValue TF
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
615 SDValue Ops
[] = { Result
, TF
};
616 return DAG
.getMergeValues(Ops
, 2, dl
);
619 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
620 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
621 /// is necessary to spill the vector being inserted into to memory, perform
622 /// the insert there, and then read the result back.
623 SDValue
SelectionDAGLegalize::
624 PerformInsertVectorEltInMemory(SDValue Vec
, SDValue Val
, SDValue Idx
,
630 // If the target doesn't support this, we have to spill the input vector
631 // to a temporary stack slot, update the element, then reload it. This is
632 // badness. We could also load the value into a vector register (either
633 // with a "move to register" or "extload into register" instruction, then
634 // permute it into place, if the idx is a constant and if the idx is
635 // supported by the target.
636 EVT VT
= Tmp1
.getValueType();
637 EVT EltVT
= VT
.getVectorElementType();
638 EVT IdxVT
= Tmp3
.getValueType();
639 EVT PtrVT
= TLI
.getPointerTy();
640 SDValue StackPtr
= DAG
.CreateStackTemporary(VT
);
642 int SPFI
= cast
<FrameIndexSDNode
>(StackPtr
.getNode())->getIndex();
645 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Tmp1
, StackPtr
,
646 PseudoSourceValue::getFixedStack(SPFI
), 0);
648 // Truncate or zero extend offset to target pointer type.
649 unsigned CastOpc
= IdxVT
.bitsGT(PtrVT
) ? ISD::TRUNCATE
: ISD::ZERO_EXTEND
;
650 Tmp3
= DAG
.getNode(CastOpc
, dl
, PtrVT
, Tmp3
);
651 // Add the offset to the index.
652 unsigned EltSize
= EltVT
.getSizeInBits()/8;
653 Tmp3
= DAG
.getNode(ISD::MUL
, dl
, IdxVT
, Tmp3
,DAG
.getConstant(EltSize
, IdxVT
));
654 SDValue StackPtr2
= DAG
.getNode(ISD::ADD
, dl
, IdxVT
, Tmp3
, StackPtr
);
655 // Store the scalar value.
656 Ch
= DAG
.getTruncStore(Ch
, dl
, Tmp2
, StackPtr2
,
657 PseudoSourceValue::getFixedStack(SPFI
), 0, EltVT
);
658 // Load the updated vector.
659 return DAG
.getLoad(VT
, dl
, Ch
, StackPtr
,
660 PseudoSourceValue::getFixedStack(SPFI
), 0);
664 SDValue
SelectionDAGLegalize::
665 ExpandINSERT_VECTOR_ELT(SDValue Vec
, SDValue Val
, SDValue Idx
, DebugLoc dl
) {
666 if (ConstantSDNode
*InsertPos
= dyn_cast
<ConstantSDNode
>(Idx
)) {
667 // SCALAR_TO_VECTOR requires that the type of the value being inserted
668 // match the element type of the vector being created, except for
669 // integers in which case the inserted value can be over width.
670 EVT EltVT
= Vec
.getValueType().getVectorElementType();
671 if (Val
.getValueType() == EltVT
||
672 (EltVT
.isInteger() && Val
.getValueType().bitsGE(EltVT
))) {
673 SDValue ScVec
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
,
674 Vec
.getValueType(), Val
);
676 unsigned NumElts
= Vec
.getValueType().getVectorNumElements();
677 // We generate a shuffle of InVec and ScVec, so the shuffle mask
678 // should be 0,1,2,3,4,5... with the appropriate element replaced with
680 SmallVector
<int, 8> ShufOps
;
681 for (unsigned i
= 0; i
!= NumElts
; ++i
)
682 ShufOps
.push_back(i
!= InsertPos
->getZExtValue() ? i
: NumElts
);
684 return DAG
.getVectorShuffle(Vec
.getValueType(), dl
, Vec
, ScVec
,
688 return PerformInsertVectorEltInMemory(Vec
, Val
, Idx
, dl
);
691 SDValue
SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode
* ST
) {
692 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
693 // FIXME: We shouldn't do this for TargetConstantFP's.
694 // FIXME: move this to the DAG Combiner! Note that we can't regress due
695 // to phase ordering between legalized code and the dag combiner. This
696 // probably means that we need to integrate dag combiner and legalizer
698 // We generally can't do this one for long doubles.
699 SDValue Tmp1
= ST
->getChain();
700 SDValue Tmp2
= ST
->getBasePtr();
702 int SVOffset
= ST
->getSrcValueOffset();
703 unsigned Alignment
= ST
->getAlignment();
704 bool isVolatile
= ST
->isVolatile();
705 DebugLoc dl
= ST
->getDebugLoc();
706 if (ConstantFPSDNode
*CFP
= dyn_cast
<ConstantFPSDNode
>(ST
->getValue())) {
707 if (CFP
->getValueType(0) == MVT::f32
&&
708 getTypeAction(MVT::i32
) == Legal
) {
709 Tmp3
= DAG
.getConstant(CFP
->getValueAPF().
710 bitcastToAPInt().zextOrTrunc(32),
712 return DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
713 SVOffset
, isVolatile
, Alignment
);
714 } else if (CFP
->getValueType(0) == MVT::f64
) {
715 // If this target supports 64-bit registers, do a single 64-bit store.
716 if (getTypeAction(MVT::i64
) == Legal
) {
717 Tmp3
= DAG
.getConstant(CFP
->getValueAPF().bitcastToAPInt().
718 zextOrTrunc(64), MVT::i64
);
719 return DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
720 SVOffset
, isVolatile
, Alignment
);
721 } else if (getTypeAction(MVT::i32
) == Legal
&& !ST
->isVolatile()) {
722 // Otherwise, if the target supports 32-bit registers, use 2 32-bit
723 // stores. If the target supports neither 32- nor 64-bits, this
724 // xform is certainly not worth it.
725 const APInt
&IntVal
=CFP
->getValueAPF().bitcastToAPInt();
726 SDValue Lo
= DAG
.getConstant(APInt(IntVal
).trunc(32), MVT::i32
);
727 SDValue Hi
= DAG
.getConstant(IntVal
.lshr(32).trunc(32), MVT::i32
);
728 if (TLI
.isBigEndian()) std::swap(Lo
, Hi
);
730 Lo
= DAG
.getStore(Tmp1
, dl
, Lo
, Tmp2
, ST
->getSrcValue(),
731 SVOffset
, isVolatile
, Alignment
);
732 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
733 DAG
.getIntPtrConstant(4));
734 Hi
= DAG
.getStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getSrcValue(), SVOffset
+4,
735 isVolatile
, MinAlign(Alignment
, 4U));
737 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
, Hi
);
744 /// LegalizeOp - We know that the specified value has a legal type, and
745 /// that its operands are legal. Now ensure that the operation itself
746 /// is legal, recursively ensuring that the operands' operations remain
748 SDValue
SelectionDAGLegalize::LegalizeOp(SDValue Op
) {
749 if (Op
.getOpcode() == ISD::TargetConstant
) // Allow illegal target nodes.
752 SDNode
*Node
= Op
.getNode();
753 DebugLoc dl
= Node
->getDebugLoc();
755 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
756 assert(getTypeAction(Node
->getValueType(i
)) == Legal
&&
757 "Unexpected illegal type!");
759 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
)
760 assert((isTypeLegal(Node
->getOperand(i
).getValueType()) ||
761 Node
->getOperand(i
).getOpcode() == ISD::TargetConstant
) &&
762 "Unexpected illegal type!");
764 // Note that LegalizeOp may be reentered even from single-use nodes, which
765 // means that we always must cache transformed nodes.
766 DenseMap
<SDValue
, SDValue
>::iterator I
= LegalizedNodes
.find(Op
);
767 if (I
!= LegalizedNodes
.end()) return I
->second
;
769 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
;
771 bool isCustom
= false;
773 // Figure out the correct action; the way to query this varies by opcode
774 TargetLowering::LegalizeAction Action
;
775 bool SimpleFinishLegalizing
= true;
776 switch (Node
->getOpcode()) {
777 case ISD::INTRINSIC_W_CHAIN
:
778 case ISD::INTRINSIC_WO_CHAIN
:
779 case ISD::INTRINSIC_VOID
:
782 Action
= TLI
.getOperationAction(Node
->getOpcode(), MVT::Other
);
784 case ISD::SINT_TO_FP
:
785 case ISD::UINT_TO_FP
:
786 case ISD::EXTRACT_VECTOR_ELT
:
787 Action
= TLI
.getOperationAction(Node
->getOpcode(),
788 Node
->getOperand(0).getValueType());
790 case ISD::FP_ROUND_INREG
:
791 case ISD::SIGN_EXTEND_INREG
: {
792 EVT InnerType
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
793 Action
= TLI
.getOperationAction(Node
->getOpcode(), InnerType
);
799 unsigned CCOperand
= Node
->getOpcode() == ISD::SELECT_CC
? 4 :
800 Node
->getOpcode() == ISD::SETCC
? 2 : 1;
801 unsigned CompareOperand
= Node
->getOpcode() == ISD::BR_CC
? 2 : 0;
802 EVT OpVT
= Node
->getOperand(CompareOperand
).getValueType();
803 ISD::CondCode CCCode
=
804 cast
<CondCodeSDNode
>(Node
->getOperand(CCOperand
))->get();
805 Action
= TLI
.getCondCodeAction(CCCode
, OpVT
);
806 if (Action
== TargetLowering::Legal
) {
807 if (Node
->getOpcode() == ISD::SELECT_CC
)
808 Action
= TLI
.getOperationAction(Node
->getOpcode(),
809 Node
->getValueType(0));
811 Action
= TLI
.getOperationAction(Node
->getOpcode(), OpVT
);
817 // FIXME: Model these properly. LOAD and STORE are complicated, and
818 // STORE expects the unlegalized operand in some cases.
819 SimpleFinishLegalizing
= false;
821 case ISD::CALLSEQ_START
:
822 case ISD::CALLSEQ_END
:
823 // FIXME: This shouldn't be necessary. These nodes have special properties
824 // dealing with the recursive nature of legalization. Removing this
825 // special case should be done as part of making LegalizeDAG non-recursive.
826 SimpleFinishLegalizing
= false;
828 case ISD::EXTRACT_ELEMENT
:
829 case ISD::FLT_ROUNDS_
:
837 case ISD::MERGE_VALUES
:
839 case ISD::FRAME_TO_ARGS_OFFSET
:
840 // These operations lie about being legal: when they claim to be legal,
841 // they should actually be expanded.
842 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
843 if (Action
== TargetLowering::Legal
)
844 Action
= TargetLowering::Expand
;
846 case ISD::TRAMPOLINE
:
848 case ISD::RETURNADDR
:
849 // These operations lie about being legal: when they claim to be legal,
850 // they should actually be custom-lowered.
851 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
852 if (Action
== TargetLowering::Legal
)
853 Action
= TargetLowering::Custom
;
855 case ISD::BUILD_VECTOR
:
856 // A weird case: legalization for BUILD_VECTOR never legalizes the
858 // FIXME: This really sucks... changing it isn't semantically incorrect,
859 // but it massively pessimizes the code for floating-point BUILD_VECTORs
860 // because ConstantFP operands get legalized into constant pool loads
861 // before the BUILD_VECTOR code can see them. It doesn't usually bite,
862 // though, because BUILD_VECTORS usually get lowered into other nodes
863 // which get legalized properly.
864 SimpleFinishLegalizing
= false;
867 if (Node
->getOpcode() >= ISD::BUILTIN_OP_END
) {
868 Action
= TargetLowering::Legal
;
870 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
875 if (SimpleFinishLegalizing
) {
876 SmallVector
<SDValue
, 8> Ops
, ResultVals
;
877 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
)
878 Ops
.push_back(LegalizeOp(Node
->getOperand(i
)));
879 switch (Node
->getOpcode()) {
886 // Branches tweak the chain to include LastCALLSEQ_END
887 Ops
[0] = DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Ops
[0],
889 Ops
[0] = LegalizeOp(Ops
[0]);
890 LastCALLSEQ_END
= DAG
.getEntryNode();
897 // Legalizing shifts/rotates requires adjusting the shift amount
898 // to the appropriate width.
899 if (!Ops
[1].getValueType().isVector())
900 Ops
[1] = LegalizeOp(DAG
.getShiftAmountOperand(Ops
[1]));
905 // Legalizing shifts/rotates requires adjusting the shift amount
906 // to the appropriate width.
907 if (!Ops
[2].getValueType().isVector())
908 Ops
[2] = LegalizeOp(DAG
.getShiftAmountOperand(Ops
[2]));
912 Result
= DAG
.UpdateNodeOperands(Result
.getValue(0), Ops
.data(),
915 case TargetLowering::Legal
:
916 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
917 ResultVals
.push_back(Result
.getValue(i
));
919 case TargetLowering::Custom
:
920 // FIXME: The handling for custom lowering with multiple results is
922 Tmp1
= TLI
.LowerOperation(Result
, DAG
);
923 if (Tmp1
.getNode()) {
924 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
) {
926 ResultVals
.push_back(Tmp1
);
928 ResultVals
.push_back(Tmp1
.getValue(i
));
934 case TargetLowering::Expand
:
935 ExpandNode(Result
.getNode(), ResultVals
);
937 case TargetLowering::Promote
:
938 PromoteNode(Result
.getNode(), ResultVals
);
941 if (!ResultVals
.empty()) {
942 for (unsigned i
= 0, e
= ResultVals
.size(); i
!= e
; ++i
) {
943 if (ResultVals
[i
] != SDValue(Node
, i
))
944 ResultVals
[i
] = LegalizeOp(ResultVals
[i
]);
945 AddLegalizedOperand(SDValue(Node
, i
), ResultVals
[i
]);
947 return ResultVals
[Op
.getResNo()];
951 switch (Node
->getOpcode()) {
958 llvm_unreachable("Do not know how to legalize this operator!");
960 case ISD::BUILD_VECTOR
:
961 switch (TLI
.getOperationAction(ISD::BUILD_VECTOR
, Node
->getValueType(0))) {
962 default: llvm_unreachable("This action is not supported yet!");
963 case TargetLowering::Custom
:
964 Tmp3
= TLI
.LowerOperation(Result
, DAG
);
965 if (Tmp3
.getNode()) {
970 case TargetLowering::Expand
:
971 Result
= ExpandBUILD_VECTOR(Result
.getNode());
975 case ISD::CALLSEQ_START
: {
976 SDNode
*CallEnd
= FindCallEndFromCallStart(Node
);
978 // Recursively Legalize all of the inputs of the call end that do not lead
979 // to this call start. This ensures that any libcalls that need be inserted
980 // are inserted *before* the CALLSEQ_START.
981 {SmallPtrSet
<SDNode
*, 32> NodesLeadingTo
;
982 for (unsigned i
= 0, e
= CallEnd
->getNumOperands(); i
!= e
; ++i
)
983 LegalizeAllNodesNotLeadingTo(CallEnd
->getOperand(i
).getNode(), Node
,
987 // Now that we legalized all of the inputs (which may have inserted
988 // libcalls) create the new CALLSEQ_START node.
989 Tmp1
= LegalizeOp(Node
->getOperand(0)); // Legalize the chain.
991 // Merge in the last call, to ensure that this call start after the last
993 if (LastCALLSEQ_END
.getOpcode() != ISD::EntryToken
) {
994 Tmp1
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
995 Tmp1
, LastCALLSEQ_END
);
996 Tmp1
= LegalizeOp(Tmp1
);
999 // Do not try to legalize the target-specific arguments (#1+).
1000 if (Tmp1
!= Node
->getOperand(0)) {
1001 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1003 Result
= DAG
.UpdateNodeOperands(Result
, &Ops
[0], Ops
.size());
1006 // Remember that the CALLSEQ_START is legalized.
1007 AddLegalizedOperand(Op
.getValue(0), Result
);
1008 if (Node
->getNumValues() == 2) // If this has a flag result, remember it.
1009 AddLegalizedOperand(Op
.getValue(1), Result
.getValue(1));
1011 // Now that the callseq_start and all of the non-call nodes above this call
1012 // sequence have been legalized, legalize the call itself. During this
1013 // process, no libcalls can/will be inserted, guaranteeing that no calls
1015 assert(!IsLegalizingCall
&& "Inconsistent sequentialization of calls!");
1016 // Note that we are selecting this call!
1017 LastCALLSEQ_END
= SDValue(CallEnd
, 0);
1018 IsLegalizingCall
= true;
1020 // Legalize the call, starting from the CALLSEQ_END.
1021 LegalizeOp(LastCALLSEQ_END
);
1022 assert(!IsLegalizingCall
&& "CALLSEQ_END should have cleared this!");
1025 case ISD::CALLSEQ_END
:
1026 // If the CALLSEQ_START node hasn't been legalized first, legalize it. This
1027 // will cause this node to be legalized as well as handling libcalls right.
1028 if (LastCALLSEQ_END
.getNode() != Node
) {
1029 LegalizeOp(SDValue(FindCallStartFromCallEnd(Node
), 0));
1030 DenseMap
<SDValue
, SDValue
>::iterator I
= LegalizedNodes
.find(Op
);
1031 assert(I
!= LegalizedNodes
.end() &&
1032 "Legalizing the call start should have legalized this node!");
1036 // Otherwise, the call start has been legalized and everything is going
1037 // according to plan. Just legalize ourselves normally here.
1038 Tmp1
= LegalizeOp(Node
->getOperand(0)); // Legalize the chain.
1039 // Do not try to legalize the target-specific arguments (#1+), except for
1040 // an optional flag input.
1041 if (Node
->getOperand(Node
->getNumOperands()-1).getValueType() != MVT::Flag
){
1042 if (Tmp1
!= Node
->getOperand(0)) {
1043 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1045 Result
= DAG
.UpdateNodeOperands(Result
, &Ops
[0], Ops
.size());
1048 Tmp2
= LegalizeOp(Node
->getOperand(Node
->getNumOperands()-1));
1049 if (Tmp1
!= Node
->getOperand(0) ||
1050 Tmp2
!= Node
->getOperand(Node
->getNumOperands()-1)) {
1051 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1054 Result
= DAG
.UpdateNodeOperands(Result
, &Ops
[0], Ops
.size());
1057 assert(IsLegalizingCall
&& "Call sequence imbalance between start/end?");
1058 // This finishes up call legalization.
1059 IsLegalizingCall
= false;
1061 // If the CALLSEQ_END node has a flag, remember that we legalized it.
1062 AddLegalizedOperand(SDValue(Node
, 0), Result
.getValue(0));
1063 if (Node
->getNumValues() == 2)
1064 AddLegalizedOperand(SDValue(Node
, 1), Result
.getValue(1));
1065 return Result
.getValue(Op
.getResNo());
1067 LoadSDNode
*LD
= cast
<LoadSDNode
>(Node
);
1068 Tmp1
= LegalizeOp(LD
->getChain()); // Legalize the chain.
1069 Tmp2
= LegalizeOp(LD
->getBasePtr()); // Legalize the base pointer.
1071 ISD::LoadExtType ExtType
= LD
->getExtensionType();
1072 if (ExtType
== ISD::NON_EXTLOAD
) {
1073 EVT VT
= Node
->getValueType(0);
1074 Result
= DAG
.UpdateNodeOperands(Result
, Tmp1
, Tmp2
, LD
->getOffset());
1075 Tmp3
= Result
.getValue(0);
1076 Tmp4
= Result
.getValue(1);
1078 switch (TLI
.getOperationAction(Node
->getOpcode(), VT
)) {
1079 default: llvm_unreachable("This action is not supported yet!");
1080 case TargetLowering::Legal
:
1081 // If this is an unaligned load and the target doesn't support it,
1083 if (!TLI
.allowsUnalignedMemoryAccesses(LD
->getMemoryVT())) {
1084 const Type
*Ty
= LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1085 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1086 if (LD
->getAlignment() < ABIAlignment
){
1087 Result
= ExpandUnalignedLoad(cast
<LoadSDNode
>(Result
.getNode()),
1089 Tmp3
= Result
.getOperand(0);
1090 Tmp4
= Result
.getOperand(1);
1091 Tmp3
= LegalizeOp(Tmp3
);
1092 Tmp4
= LegalizeOp(Tmp4
);
1096 case TargetLowering::Custom
:
1097 Tmp1
= TLI
.LowerOperation(Tmp3
, DAG
);
1098 if (Tmp1
.getNode()) {
1099 Tmp3
= LegalizeOp(Tmp1
);
1100 Tmp4
= LegalizeOp(Tmp1
.getValue(1));
1103 case TargetLowering::Promote
: {
1104 // Only promote a load of vector type to another.
1105 assert(VT
.isVector() && "Cannot promote this load!");
1106 // Change base type to a different vector type.
1107 EVT NVT
= TLI
.getTypeToPromoteTo(Node
->getOpcode(), VT
);
1109 Tmp1
= DAG
.getLoad(NVT
, dl
, Tmp1
, Tmp2
, LD
->getSrcValue(),
1110 LD
->getSrcValueOffset(),
1111 LD
->isVolatile(), LD
->getAlignment());
1112 Tmp3
= LegalizeOp(DAG
.getNode(ISD::BIT_CONVERT
, dl
, VT
, Tmp1
));
1113 Tmp4
= LegalizeOp(Tmp1
.getValue(1));
1117 // Since loads produce two values, make sure to remember that we
1118 // legalized both of them.
1119 AddLegalizedOperand(SDValue(Node
, 0), Tmp3
);
1120 AddLegalizedOperand(SDValue(Node
, 1), Tmp4
);
1121 return Op
.getResNo() ? Tmp4
: Tmp3
;
1123 EVT SrcVT
= LD
->getMemoryVT();
1124 unsigned SrcWidth
= SrcVT
.getSizeInBits();
1125 int SVOffset
= LD
->getSrcValueOffset();
1126 unsigned Alignment
= LD
->getAlignment();
1127 bool isVolatile
= LD
->isVolatile();
1129 if (SrcWidth
!= SrcVT
.getStoreSizeInBits() &&
1130 // Some targets pretend to have an i1 loading operation, and actually
1131 // load an i8. This trick is correct for ZEXTLOAD because the top 7
1132 // bits are guaranteed to be zero; it helps the optimizers understand
1133 // that these bits are zero. It is also useful for EXTLOAD, since it
1134 // tells the optimizers that those bits are undefined. It would be
1135 // nice to have an effective generic way of getting these benefits...
1136 // Until such a way is found, don't insist on promoting i1 here.
1137 (SrcVT
!= MVT::i1
||
1138 TLI
.getLoadExtAction(ExtType
, MVT::i1
) == TargetLowering::Promote
)) {
1139 // Promote to a byte-sized load if not loading an integral number of
1140 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
1141 unsigned NewWidth
= SrcVT
.getStoreSizeInBits();
1142 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(), NewWidth
);
1145 // The extra bits are guaranteed to be zero, since we stored them that
1146 // way. A zext load from NVT thus automatically gives zext from SrcVT.
1148 ISD::LoadExtType NewExtType
=
1149 ExtType
== ISD::ZEXTLOAD
? ISD::ZEXTLOAD
: ISD::EXTLOAD
;
1151 Result
= DAG
.getExtLoad(NewExtType
, dl
, Node
->getValueType(0),
1152 Tmp1
, Tmp2
, LD
->getSrcValue(), SVOffset
,
1153 NVT
, isVolatile
, Alignment
);
1155 Ch
= Result
.getValue(1); // The chain.
1157 if (ExtType
== ISD::SEXTLOAD
)
1158 // Having the top bits zero doesn't help when sign extending.
1159 Result
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
,
1160 Result
.getValueType(),
1161 Result
, DAG
.getValueType(SrcVT
));
1162 else if (ExtType
== ISD::ZEXTLOAD
|| NVT
== Result
.getValueType())
1163 // All the top bits are guaranteed to be zero - inform the optimizers.
1164 Result
= DAG
.getNode(ISD::AssertZext
, dl
,
1165 Result
.getValueType(), Result
,
1166 DAG
.getValueType(SrcVT
));
1168 Tmp1
= LegalizeOp(Result
);
1169 Tmp2
= LegalizeOp(Ch
);
1170 } else if (SrcWidth
& (SrcWidth
- 1)) {
1171 // If not loading a power-of-2 number of bits, expand as two loads.
1172 assert(SrcVT
.isExtended() && !SrcVT
.isVector() &&
1173 "Unsupported extload!");
1174 unsigned RoundWidth
= 1 << Log2_32(SrcWidth
);
1175 assert(RoundWidth
< SrcWidth
);
1176 unsigned ExtraWidth
= SrcWidth
- RoundWidth
;
1177 assert(ExtraWidth
< RoundWidth
);
1178 assert(!(RoundWidth
% 8) && !(ExtraWidth
% 8) &&
1179 "Load size not an integral number of bytes!");
1180 EVT RoundVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundWidth
);
1181 EVT ExtraVT
= EVT::getIntegerVT(*DAG
.getContext(), ExtraWidth
);
1183 unsigned IncrementSize
;
1185 if (TLI
.isLittleEndian()) {
1186 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
1187 // Load the bottom RoundWidth bits.
1188 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
,
1189 Node
->getValueType(0), Tmp1
, Tmp2
,
1190 LD
->getSrcValue(), SVOffset
, RoundVT
, isVolatile
,
1193 // Load the remaining ExtraWidth bits.
1194 IncrementSize
= RoundWidth
/ 8;
1195 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1196 DAG
.getIntPtrConstant(IncrementSize
));
1197 Hi
= DAG
.getExtLoad(ExtType
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
1198 LD
->getSrcValue(), SVOffset
+ IncrementSize
,
1199 ExtraVT
, isVolatile
,
1200 MinAlign(Alignment
, IncrementSize
));
1202 // Build a factor node to remember that this load is independent of the
1204 Ch
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
1207 // Move the top bits to the right place.
1208 Hi
= DAG
.getNode(ISD::SHL
, dl
, Hi
.getValueType(), Hi
,
1209 DAG
.getConstant(RoundWidth
, TLI
.getShiftAmountTy()));
1211 // Join the hi and lo parts.
1212 Result
= DAG
.getNode(ISD::OR
, dl
, Node
->getValueType(0), Lo
, Hi
);
1214 // Big endian - avoid unaligned loads.
1215 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
1216 // Load the top RoundWidth bits.
1217 Hi
= DAG
.getExtLoad(ExtType
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
1218 LD
->getSrcValue(), SVOffset
, RoundVT
, isVolatile
,
1221 // Load the remaining ExtraWidth bits.
1222 IncrementSize
= RoundWidth
/ 8;
1223 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1224 DAG
.getIntPtrConstant(IncrementSize
));
1225 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
,
1226 Node
->getValueType(0), Tmp1
, Tmp2
,
1227 LD
->getSrcValue(), SVOffset
+ IncrementSize
,
1228 ExtraVT
, isVolatile
,
1229 MinAlign(Alignment
, IncrementSize
));
1231 // Build a factor node to remember that this load is independent of the
1233 Ch
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
1236 // Move the top bits to the right place.
1237 Hi
= DAG
.getNode(ISD::SHL
, dl
, Hi
.getValueType(), Hi
,
1238 DAG
.getConstant(ExtraWidth
, TLI
.getShiftAmountTy()));
1240 // Join the hi and lo parts.
1241 Result
= DAG
.getNode(ISD::OR
, dl
, Node
->getValueType(0), Lo
, Hi
);
1244 Tmp1
= LegalizeOp(Result
);
1245 Tmp2
= LegalizeOp(Ch
);
1247 switch (TLI
.getLoadExtAction(ExtType
, SrcVT
)) {
1248 default: llvm_unreachable("This action is not supported yet!");
1249 case TargetLowering::Custom
:
1252 case TargetLowering::Legal
:
1253 Result
= DAG
.UpdateNodeOperands(Result
, Tmp1
, Tmp2
, LD
->getOffset());
1254 Tmp1
= Result
.getValue(0);
1255 Tmp2
= Result
.getValue(1);
1258 Tmp3
= TLI
.LowerOperation(Result
, DAG
);
1259 if (Tmp3
.getNode()) {
1260 Tmp1
= LegalizeOp(Tmp3
);
1261 Tmp2
= LegalizeOp(Tmp3
.getValue(1));
1264 // If this is an unaligned load and the target doesn't support it,
1266 if (!TLI
.allowsUnalignedMemoryAccesses(LD
->getMemoryVT())) {
1267 const Type
*Ty
= LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1268 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1269 if (LD
->getAlignment() < ABIAlignment
){
1270 Result
= ExpandUnalignedLoad(cast
<LoadSDNode
>(Result
.getNode()),
1272 Tmp1
= Result
.getOperand(0);
1273 Tmp2
= Result
.getOperand(1);
1274 Tmp1
= LegalizeOp(Tmp1
);
1275 Tmp2
= LegalizeOp(Tmp2
);
1280 case TargetLowering::Expand
:
1281 // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND
1282 // f128 = EXTLOAD {f32,f64} too
1283 if ((SrcVT
== MVT::f32
&& (Node
->getValueType(0) == MVT::f64
||
1284 Node
->getValueType(0) == MVT::f128
)) ||
1285 (SrcVT
== MVT::f64
&& Node
->getValueType(0) == MVT::f128
)) {
1286 SDValue Load
= DAG
.getLoad(SrcVT
, dl
, Tmp1
, Tmp2
, LD
->getSrcValue(),
1287 LD
->getSrcValueOffset(),
1288 LD
->isVolatile(), LD
->getAlignment());
1289 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
,
1290 Node
->getValueType(0), Load
);
1291 Tmp1
= LegalizeOp(Result
); // Relegalize new nodes.
1292 Tmp2
= LegalizeOp(Load
.getValue(1));
1295 assert(ExtType
!= ISD::EXTLOAD
&&"EXTLOAD should always be supported!");
1296 // Turn the unsupported load into an EXTLOAD followed by an explicit
1297 // zero/sign extend inreg.
1298 Result
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
, Node
->getValueType(0),
1299 Tmp1
, Tmp2
, LD
->getSrcValue(),
1300 LD
->getSrcValueOffset(), SrcVT
,
1301 LD
->isVolatile(), LD
->getAlignment());
1303 if (ExtType
== ISD::SEXTLOAD
)
1304 ValRes
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
,
1305 Result
.getValueType(),
1306 Result
, DAG
.getValueType(SrcVT
));
1308 ValRes
= DAG
.getZeroExtendInReg(Result
, dl
, SrcVT
);
1309 Tmp1
= LegalizeOp(ValRes
); // Relegalize new nodes.
1310 Tmp2
= LegalizeOp(Result
.getValue(1)); // Relegalize new nodes.
1315 // Since loads produce two values, make sure to remember that we legalized
1317 AddLegalizedOperand(SDValue(Node
, 0), Tmp1
);
1318 AddLegalizedOperand(SDValue(Node
, 1), Tmp2
);
1319 return Op
.getResNo() ? Tmp2
: Tmp1
;
1323 StoreSDNode
*ST
= cast
<StoreSDNode
>(Node
);
1324 Tmp1
= LegalizeOp(ST
->getChain()); // Legalize the chain.
1325 Tmp2
= LegalizeOp(ST
->getBasePtr()); // Legalize the pointer.
1326 int SVOffset
= ST
->getSrcValueOffset();
1327 unsigned Alignment
= ST
->getAlignment();
1328 bool isVolatile
= ST
->isVolatile();
1330 if (!ST
->isTruncatingStore()) {
1331 if (SDNode
*OptStore
= OptimizeFloatStore(ST
).getNode()) {
1332 Result
= SDValue(OptStore
, 0);
1337 Tmp3
= LegalizeOp(ST
->getValue());
1338 Result
= DAG
.UpdateNodeOperands(Result
, Tmp1
, Tmp3
, Tmp2
,
1341 EVT VT
= Tmp3
.getValueType();
1342 switch (TLI
.getOperationAction(ISD::STORE
, VT
)) {
1343 default: llvm_unreachable("This action is not supported yet!");
1344 case TargetLowering::Legal
:
1345 // If this is an unaligned store and the target doesn't support it,
1347 if (!TLI
.allowsUnalignedMemoryAccesses(ST
->getMemoryVT())) {
1348 const Type
*Ty
= ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1349 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1350 if (ST
->getAlignment() < ABIAlignment
)
1351 Result
= ExpandUnalignedStore(cast
<StoreSDNode
>(Result
.getNode()),
1355 case TargetLowering::Custom
:
1356 Tmp1
= TLI
.LowerOperation(Result
, DAG
);
1357 if (Tmp1
.getNode()) Result
= Tmp1
;
1359 case TargetLowering::Promote
:
1360 assert(VT
.isVector() && "Unknown legal promote case!");
1361 Tmp3
= DAG
.getNode(ISD::BIT_CONVERT
, dl
,
1362 TLI
.getTypeToPromoteTo(ISD::STORE
, VT
), Tmp3
);
1363 Result
= DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
,
1364 ST
->getSrcValue(), SVOffset
, isVolatile
,
1371 Tmp3
= LegalizeOp(ST
->getValue());
1373 EVT StVT
= ST
->getMemoryVT();
1374 unsigned StWidth
= StVT
.getSizeInBits();
1376 if (StWidth
!= StVT
.getStoreSizeInBits()) {
1377 // Promote to a byte-sized store with upper bits zero if not
1378 // storing an integral number of bytes. For example, promote
1379 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
1380 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(), StVT
.getStoreSizeInBits());
1381 Tmp3
= DAG
.getZeroExtendInReg(Tmp3
, dl
, StVT
);
1382 Result
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1383 SVOffset
, NVT
, isVolatile
, Alignment
);
1384 } else if (StWidth
& (StWidth
- 1)) {
1385 // If not storing a power-of-2 number of bits, expand as two stores.
1386 assert(StVT
.isExtended() && !StVT
.isVector() &&
1387 "Unsupported truncstore!");
1388 unsigned RoundWidth
= 1 << Log2_32(StWidth
);
1389 assert(RoundWidth
< StWidth
);
1390 unsigned ExtraWidth
= StWidth
- RoundWidth
;
1391 assert(ExtraWidth
< RoundWidth
);
1392 assert(!(RoundWidth
% 8) && !(ExtraWidth
% 8) &&
1393 "Store size not an integral number of bytes!");
1394 EVT RoundVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundWidth
);
1395 EVT ExtraVT
= EVT::getIntegerVT(*DAG
.getContext(), ExtraWidth
);
1397 unsigned IncrementSize
;
1399 if (TLI
.isLittleEndian()) {
1400 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
1401 // Store the bottom RoundWidth bits.
1402 Lo
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1404 isVolatile
, Alignment
);
1406 // Store the remaining ExtraWidth bits.
1407 IncrementSize
= RoundWidth
/ 8;
1408 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1409 DAG
.getIntPtrConstant(IncrementSize
));
1410 Hi
= DAG
.getNode(ISD::SRL
, dl
, Tmp3
.getValueType(), Tmp3
,
1411 DAG
.getConstant(RoundWidth
, TLI
.getShiftAmountTy()));
1412 Hi
= DAG
.getTruncStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getSrcValue(),
1413 SVOffset
+ IncrementSize
, ExtraVT
, isVolatile
,
1414 MinAlign(Alignment
, IncrementSize
));
1416 // Big endian - avoid unaligned stores.
1417 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
1418 // Store the top RoundWidth bits.
1419 Hi
= DAG
.getNode(ISD::SRL
, dl
, Tmp3
.getValueType(), Tmp3
,
1420 DAG
.getConstant(ExtraWidth
, TLI
.getShiftAmountTy()));
1421 Hi
= DAG
.getTruncStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getSrcValue(),
1422 SVOffset
, RoundVT
, isVolatile
, Alignment
);
1424 // Store the remaining ExtraWidth bits.
1425 IncrementSize
= RoundWidth
/ 8;
1426 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1427 DAG
.getIntPtrConstant(IncrementSize
));
1428 Lo
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1429 SVOffset
+ IncrementSize
, ExtraVT
, isVolatile
,
1430 MinAlign(Alignment
, IncrementSize
));
1433 // The order of the stores doesn't matter.
1434 Result
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
, Hi
);
1436 if (Tmp1
!= ST
->getChain() || Tmp3
!= ST
->getValue() ||
1437 Tmp2
!= ST
->getBasePtr())
1438 Result
= DAG
.UpdateNodeOperands(Result
, Tmp1
, Tmp3
, Tmp2
,
1441 switch (TLI
.getTruncStoreAction(ST
->getValue().getValueType(), StVT
)) {
1442 default: llvm_unreachable("This action is not supported yet!");
1443 case TargetLowering::Legal
:
1444 // If this is an unaligned store and the target doesn't support it,
1446 if (!TLI
.allowsUnalignedMemoryAccesses(ST
->getMemoryVT())) {
1447 const Type
*Ty
= ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1448 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1449 if (ST
->getAlignment() < ABIAlignment
)
1450 Result
= ExpandUnalignedStore(cast
<StoreSDNode
>(Result
.getNode()),
1454 case TargetLowering::Custom
:
1455 Result
= TLI
.LowerOperation(Result
, DAG
);
1458 // TRUNCSTORE:i16 i32 -> STORE i16
1459 assert(isTypeLegal(StVT
) && "Do not know how to expand this store!");
1460 Tmp3
= DAG
.getNode(ISD::TRUNCATE
, dl
, StVT
, Tmp3
);
1461 Result
= DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1462 SVOffset
, isVolatile
, Alignment
);
1470 assert(Result
.getValueType() == Op
.getValueType() &&
1471 "Bad legalization!");
1473 // Make sure that the generated code is itself legal.
1475 Result
= LegalizeOp(Result
);
1477 // Note that LegalizeOp may be reentered even from single-use nodes, which
1478 // means that we always must cache transformed nodes.
1479 AddLegalizedOperand(Op
, Result
);
1483 SDValue
SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op
) {
1484 SDValue Vec
= Op
.getOperand(0);
1485 SDValue Idx
= Op
.getOperand(1);
1486 DebugLoc dl
= Op
.getDebugLoc();
1487 // Store the value to a temporary stack slot, then LOAD the returned part.
1488 SDValue StackPtr
= DAG
.CreateStackTemporary(Vec
.getValueType());
1489 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Vec
, StackPtr
, NULL
, 0);
1491 // Add the offset to the index.
1493 Vec
.getValueType().getVectorElementType().getSizeInBits()/8;
1494 Idx
= DAG
.getNode(ISD::MUL
, dl
, Idx
.getValueType(), Idx
,
1495 DAG
.getConstant(EltSize
, Idx
.getValueType()));
1497 if (Idx
.getValueType().bitsGT(TLI
.getPointerTy()))
1498 Idx
= DAG
.getNode(ISD::TRUNCATE
, dl
, TLI
.getPointerTy(), Idx
);
1500 Idx
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, TLI
.getPointerTy(), Idx
);
1502 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, Idx
.getValueType(), Idx
, StackPtr
);
1504 if (Op
.getValueType().isVector())
1505 return DAG
.getLoad(Op
.getValueType(), dl
, Ch
, StackPtr
, NULL
, 0);
1507 return DAG
.getExtLoad(ISD::EXTLOAD
, dl
, Op
.getValueType(), Ch
, StackPtr
,
1508 NULL
, 0, Vec
.getValueType().getVectorElementType());
1511 SDValue
SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode
* Node
) {
1512 // We can't handle this case efficiently. Allocate a sufficiently
1513 // aligned object on the stack, store each element into it, then load
1514 // the result as a vector.
1515 // Create the stack frame object.
1516 EVT VT
= Node
->getValueType(0);
1517 EVT OpVT
= Node
->getOperand(0).getValueType();
1518 DebugLoc dl
= Node
->getDebugLoc();
1519 SDValue FIPtr
= DAG
.CreateStackTemporary(VT
);
1520 int FI
= cast
<FrameIndexSDNode
>(FIPtr
.getNode())->getIndex();
1521 const Value
*SV
= PseudoSourceValue::getFixedStack(FI
);
1523 // Emit a store of each element to the stack slot.
1524 SmallVector
<SDValue
, 8> Stores
;
1525 unsigned TypeByteSize
= OpVT
.getSizeInBits() / 8;
1526 // Store (in the right endianness) the elements to memory.
1527 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1528 // Ignore undef elements.
1529 if (Node
->getOperand(i
).getOpcode() == ISD::UNDEF
) continue;
1531 unsigned Offset
= TypeByteSize
*i
;
1533 SDValue Idx
= DAG
.getConstant(Offset
, FIPtr
.getValueType());
1534 Idx
= DAG
.getNode(ISD::ADD
, dl
, FIPtr
.getValueType(), FIPtr
, Idx
);
1536 Stores
.push_back(DAG
.getStore(DAG
.getEntryNode(), dl
, Node
->getOperand(i
),
1541 if (!Stores
.empty()) // Not all undef elements?
1542 StoreChain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1543 &Stores
[0], Stores
.size());
1545 StoreChain
= DAG
.getEntryNode();
1547 // Result is a load from the stack slot.
1548 return DAG
.getLoad(VT
, dl
, StoreChain
, FIPtr
, SV
, 0);
1551 SDValue
SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode
* Node
) {
1552 DebugLoc dl
= Node
->getDebugLoc();
1553 SDValue Tmp1
= Node
->getOperand(0);
1554 SDValue Tmp2
= Node
->getOperand(1);
1555 assert((Tmp2
.getValueType() == MVT::f32
||
1556 Tmp2
.getValueType() == MVT::f64
) &&
1557 "Ugly special-cased code!");
1558 // Get the sign bit of the RHS.
1560 EVT IVT
= Tmp2
.getValueType() == MVT::f64
? MVT::i64
: MVT::i32
;
1561 if (isTypeLegal(IVT
)) {
1562 SignBit
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, IVT
, Tmp2
);
1564 assert(isTypeLegal(TLI
.getPointerTy()) &&
1565 (TLI
.getPointerTy() == MVT::i32
||
1566 TLI
.getPointerTy() == MVT::i64
) &&
1567 "Legal type for load?!");
1568 SDValue StackPtr
= DAG
.CreateStackTemporary(Tmp2
.getValueType());
1569 SDValue StorePtr
= StackPtr
, LoadPtr
= StackPtr
;
1571 DAG
.getStore(DAG
.getEntryNode(), dl
, Tmp2
, StorePtr
, NULL
, 0);
1572 if (Tmp2
.getValueType() == MVT::f64
&& TLI
.isLittleEndian())
1573 LoadPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(),
1574 LoadPtr
, DAG
.getIntPtrConstant(4));
1575 SignBit
= DAG
.getExtLoad(ISD::SEXTLOAD
, dl
, TLI
.getPointerTy(),
1576 Ch
, LoadPtr
, NULL
, 0, MVT::i32
);
1579 DAG
.getSetCC(dl
, TLI
.getSetCCResultType(SignBit
.getValueType()),
1580 SignBit
, DAG
.getConstant(0, SignBit
.getValueType()),
1582 // Get the absolute value of the result.
1583 SDValue AbsVal
= DAG
.getNode(ISD::FABS
, dl
, Tmp1
.getValueType(), Tmp1
);
1584 // Select between the nabs and abs value based on the sign bit of
1586 return DAG
.getNode(ISD::SELECT
, dl
, AbsVal
.getValueType(), SignBit
,
1587 DAG
.getNode(ISD::FNEG
, dl
, AbsVal
.getValueType(), AbsVal
),
1591 SDValue
SelectionDAGLegalize::ExpandDBG_STOPPOINT(SDNode
* Node
) {
1592 DebugLoc dl
= Node
->getDebugLoc();
1593 DwarfWriter
*DW
= DAG
.getDwarfWriter();
1594 bool useDEBUG_LOC
= TLI
.isOperationLegalOrCustom(ISD::DEBUG_LOC
,
1596 bool useLABEL
= TLI
.isOperationLegalOrCustom(ISD::DBG_LABEL
, MVT::Other
);
1598 const DbgStopPointSDNode
*DSP
= cast
<DbgStopPointSDNode
>(Node
);
1599 MDNode
*CU_Node
= DSP
->getCompileUnit();
1600 if (DW
&& (useDEBUG_LOC
|| useLABEL
)) {
1601 DICompileUnit
CU(CU_Node
);
1603 unsigned Line
= DSP
->getLine();
1604 unsigned Col
= DSP
->getColumn();
1606 if (OptLevel
== CodeGenOpt::None
) {
1607 // A bit self-referential to have DebugLoc on Debug_Loc nodes, but it
1608 // won't hurt anything.
1610 return DAG
.getNode(ISD::DEBUG_LOC
, dl
, MVT::Other
, Node
->getOperand(0),
1611 DAG
.getConstant(Line
, MVT::i32
),
1612 DAG
.getConstant(Col
, MVT::i32
),
1613 DAG
.getSrcValue(CU
.getNode()));
1615 unsigned ID
= DW
->RecordSourceLine(Line
, Col
, CU
);
1616 return DAG
.getLabel(ISD::DBG_LABEL
, dl
, Node
->getOperand(0), ID
);
1620 return Node
->getOperand(0);
1623 void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode
* Node
,
1624 SmallVectorImpl
<SDValue
> &Results
) {
1625 unsigned SPReg
= TLI
.getStackPointerRegisterToSaveRestore();
1626 assert(SPReg
&& "Target cannot require DYNAMIC_STACKALLOC expansion and"
1627 " not tell us which reg is the stack pointer!");
1628 DebugLoc dl
= Node
->getDebugLoc();
1629 EVT VT
= Node
->getValueType(0);
1630 SDValue Tmp1
= SDValue(Node
, 0);
1631 SDValue Tmp2
= SDValue(Node
, 1);
1632 SDValue Tmp3
= Node
->getOperand(2);
1633 SDValue Chain
= Tmp1
.getOperand(0);
1635 // Chain the dynamic stack allocation so that it doesn't modify the stack
1636 // pointer when other instructions are using the stack.
1637 Chain
= DAG
.getCALLSEQ_START(Chain
, DAG
.getIntPtrConstant(0, true));
1639 SDValue Size
= Tmp2
.getOperand(1);
1640 SDValue SP
= DAG
.getCopyFromReg(Chain
, dl
, SPReg
, VT
);
1641 Chain
= SP
.getValue(1);
1642 unsigned Align
= cast
<ConstantSDNode
>(Tmp3
)->getZExtValue();
1643 unsigned StackAlign
=
1644 TLI
.getTargetMachine().getFrameInfo()->getStackAlignment();
1645 if (Align
> StackAlign
)
1646 SP
= DAG
.getNode(ISD::AND
, dl
, VT
, SP
,
1647 DAG
.getConstant(-(uint64_t)Align
, VT
));
1648 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, VT
, SP
, Size
); // Value
1649 Chain
= DAG
.getCopyToReg(Chain
, dl
, SPReg
, Tmp1
); // Output chain
1651 Tmp2
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(0, true),
1652 DAG
.getIntPtrConstant(0, true), SDValue());
1654 Results
.push_back(Tmp1
);
1655 Results
.push_back(Tmp2
);
1658 /// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and
1659 /// condition code CC on the current target. This routine assumes LHS and rHS
1660 /// have already been legalized by LegalizeSetCCOperands. It expands SETCC with
1661 /// illegal condition code into AND / OR of multiple SETCC values.
1662 void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT
,
1663 SDValue
&LHS
, SDValue
&RHS
,
1666 EVT OpVT
= LHS
.getValueType();
1667 ISD::CondCode CCCode
= cast
<CondCodeSDNode
>(CC
)->get();
1668 switch (TLI
.getCondCodeAction(CCCode
, OpVT
)) {
1669 default: llvm_unreachable("Unknown condition code action!");
1670 case TargetLowering::Legal
:
1673 case TargetLowering::Expand
: {
1674 ISD::CondCode CC1
= ISD::SETCC_INVALID
, CC2
= ISD::SETCC_INVALID
;
1677 default: llvm_unreachable("Don't know how to expand this condition!");
1678 case ISD::SETOEQ
: CC1
= ISD::SETEQ
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1679 case ISD::SETOGT
: CC1
= ISD::SETGT
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1680 case ISD::SETOGE
: CC1
= ISD::SETGE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1681 case ISD::SETOLT
: CC1
= ISD::SETLT
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1682 case ISD::SETOLE
: CC1
= ISD::SETLE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1683 case ISD::SETONE
: CC1
= ISD::SETNE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1684 case ISD::SETUEQ
: CC1
= ISD::SETEQ
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1685 case ISD::SETUGT
: CC1
= ISD::SETGT
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1686 case ISD::SETUGE
: CC1
= ISD::SETGE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1687 case ISD::SETULT
: CC1
= ISD::SETLT
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1688 case ISD::SETULE
: CC1
= ISD::SETLE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1689 case ISD::SETUNE
: CC1
= ISD::SETNE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1690 // FIXME: Implement more expansions.
1693 SDValue SetCC1
= DAG
.getSetCC(dl
, VT
, LHS
, RHS
, CC1
);
1694 SDValue SetCC2
= DAG
.getSetCC(dl
, VT
, LHS
, RHS
, CC2
);
1695 LHS
= DAG
.getNode(Opc
, dl
, VT
, SetCC1
, SetCC2
);
1703 /// EmitStackConvert - Emit a store/load combination to the stack. This stores
1704 /// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does
1705 /// a load from the stack slot to DestVT, extending it if needed.
1706 /// The resultant code need not be legal.
1707 SDValue
SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp
,
1711 // Create the stack frame object.
1713 TLI
.getTargetData()->getPrefTypeAlignment(SrcOp
.getValueType().
1714 getTypeForEVT(*DAG
.getContext()));
1715 SDValue FIPtr
= DAG
.CreateStackTemporary(SlotVT
, SrcAlign
);
1717 FrameIndexSDNode
*StackPtrFI
= cast
<FrameIndexSDNode
>(FIPtr
);
1718 int SPFI
= StackPtrFI
->getIndex();
1719 const Value
*SV
= PseudoSourceValue::getFixedStack(SPFI
);
1721 unsigned SrcSize
= SrcOp
.getValueType().getSizeInBits();
1722 unsigned SlotSize
= SlotVT
.getSizeInBits();
1723 unsigned DestSize
= DestVT
.getSizeInBits();
1724 unsigned DestAlign
=
1725 TLI
.getTargetData()->getPrefTypeAlignment(DestVT
.getTypeForEVT(*DAG
.getContext()));
1727 // Emit a store to the stack slot. Use a truncstore if the input value is
1728 // later than DestVT.
1731 if (SrcSize
> SlotSize
)
1732 Store
= DAG
.getTruncStore(DAG
.getEntryNode(), dl
, SrcOp
, FIPtr
,
1733 SV
, 0, SlotVT
, false, SrcAlign
);
1735 assert(SrcSize
== SlotSize
&& "Invalid store");
1736 Store
= DAG
.getStore(DAG
.getEntryNode(), dl
, SrcOp
, FIPtr
,
1737 SV
, 0, false, SrcAlign
);
1740 // Result is a load from the stack slot.
1741 if (SlotSize
== DestSize
)
1742 return DAG
.getLoad(DestVT
, dl
, Store
, FIPtr
, SV
, 0, false, DestAlign
);
1744 assert(SlotSize
< DestSize
&& "Unknown extension!");
1745 return DAG
.getExtLoad(ISD::EXTLOAD
, dl
, DestVT
, Store
, FIPtr
, SV
, 0, SlotVT
,
1749 SDValue
SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode
*Node
) {
1750 DebugLoc dl
= Node
->getDebugLoc();
1751 // Create a vector sized/aligned stack slot, store the value to element #0,
1752 // then load the whole vector back out.
1753 SDValue StackPtr
= DAG
.CreateStackTemporary(Node
->getValueType(0));
1755 FrameIndexSDNode
*StackPtrFI
= cast
<FrameIndexSDNode
>(StackPtr
);
1756 int SPFI
= StackPtrFI
->getIndex();
1758 SDValue Ch
= DAG
.getTruncStore(DAG
.getEntryNode(), dl
, Node
->getOperand(0),
1760 PseudoSourceValue::getFixedStack(SPFI
), 0,
1761 Node
->getValueType(0).getVectorElementType());
1762 return DAG
.getLoad(Node
->getValueType(0), dl
, Ch
, StackPtr
,
1763 PseudoSourceValue::getFixedStack(SPFI
), 0);
1767 /// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't
1768 /// support the operation, but do support the resultant vector type.
1769 SDValue
SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode
*Node
) {
1770 unsigned NumElems
= Node
->getNumOperands();
1771 SDValue Value1
, Value2
;
1772 DebugLoc dl
= Node
->getDebugLoc();
1773 EVT VT
= Node
->getValueType(0);
1774 EVT OpVT
= Node
->getOperand(0).getValueType();
1775 EVT EltVT
= VT
.getVectorElementType();
1777 // If the only non-undef value is the low element, turn this into a
1778 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X.
1779 bool isOnlyLowElement
= true;
1780 bool MoreThanTwoValues
= false;
1781 bool isConstant
= true;
1782 for (unsigned i
= 0; i
< NumElems
; ++i
) {
1783 SDValue V
= Node
->getOperand(i
);
1784 if (V
.getOpcode() == ISD::UNDEF
)
1787 isOnlyLowElement
= false;
1788 if (!isa
<ConstantFPSDNode
>(V
) && !isa
<ConstantSDNode
>(V
))
1791 if (!Value1
.getNode()) {
1793 } else if (!Value2
.getNode()) {
1796 } else if (V
!= Value1
&& V
!= Value2
) {
1797 MoreThanTwoValues
= true;
1801 if (!Value1
.getNode())
1802 return DAG
.getUNDEF(VT
);
1804 if (isOnlyLowElement
)
1805 return DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Node
->getOperand(0));
1807 // If all elements are constants, create a load from the constant pool.
1809 std::vector
<Constant
*> CV
;
1810 for (unsigned i
= 0, e
= NumElems
; i
!= e
; ++i
) {
1811 if (ConstantFPSDNode
*V
=
1812 dyn_cast
<ConstantFPSDNode
>(Node
->getOperand(i
))) {
1813 CV
.push_back(const_cast<ConstantFP
*>(V
->getConstantFPValue()));
1814 } else if (ConstantSDNode
*V
=
1815 dyn_cast
<ConstantSDNode
>(Node
->getOperand(i
))) {
1816 CV
.push_back(const_cast<ConstantInt
*>(V
->getConstantIntValue()));
1818 assert(Node
->getOperand(i
).getOpcode() == ISD::UNDEF
);
1819 const Type
*OpNTy
= OpVT
.getTypeForEVT(*DAG
.getContext());
1820 CV
.push_back(UndefValue::get(OpNTy
));
1823 Constant
*CP
= ConstantVector::get(CV
);
1824 SDValue CPIdx
= DAG
.getConstantPool(CP
, TLI
.getPointerTy());
1825 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
1826 return DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(), CPIdx
,
1827 PseudoSourceValue::getConstantPool(), 0,
1831 if (!MoreThanTwoValues
) {
1832 SmallVector
<int, 8> ShuffleVec(NumElems
, -1);
1833 for (unsigned i
= 0; i
< NumElems
; ++i
) {
1834 SDValue V
= Node
->getOperand(i
);
1835 if (V
.getOpcode() == ISD::UNDEF
)
1837 ShuffleVec
[i
] = V
== Value1
? 0 : NumElems
;
1839 if (TLI
.isShuffleMaskLegal(ShuffleVec
, Node
->getValueType(0))) {
1840 // Get the splatted value into the low element of a vector register.
1841 SDValue Vec1
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value1
);
1843 if (Value2
.getNode())
1844 Vec2
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value2
);
1846 Vec2
= DAG
.getUNDEF(VT
);
1848 // Return shuffle(LowValVec, undef, <0,0,0,0>)
1849 return DAG
.getVectorShuffle(VT
, dl
, Vec1
, Vec2
, ShuffleVec
.data());
1853 // Otherwise, we can't handle this case efficiently.
1854 return ExpandVectorBuildThroughStack(Node
);
1857 // ExpandLibCall - Expand a node into a call to a libcall. If the result value
1858 // does not fit into a register, return the lo part and set the hi part to the
1859 // by-reg argument. If it does fit into a single register, return the result
1860 // and leave the Hi part unset.
1861 SDValue
SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC
, SDNode
*Node
,
1863 assert(!IsLegalizingCall
&& "Cannot overlap legalization of calls!");
1864 // The input chain to this libcall is the entry node of the function.
1865 // Legalizing the call will automatically add the previous call to the
1867 SDValue InChain
= DAG
.getEntryNode();
1869 TargetLowering::ArgListTy Args
;
1870 TargetLowering::ArgListEntry Entry
;
1871 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1872 EVT ArgVT
= Node
->getOperand(i
).getValueType();
1873 const Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
1874 Entry
.Node
= Node
->getOperand(i
); Entry
.Ty
= ArgTy
;
1875 Entry
.isSExt
= isSigned
;
1876 Entry
.isZExt
= !isSigned
;
1877 Args
.push_back(Entry
);
1879 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
1880 TLI
.getPointerTy());
1882 // Splice the libcall in wherever FindInputOutputChains tells us to.
1883 const Type
*RetTy
= Node
->getValueType(0).getTypeForEVT(*DAG
.getContext());
1884 std::pair
<SDValue
, SDValue
> CallInfo
=
1885 TLI
.LowerCallTo(InChain
, RetTy
, isSigned
, !isSigned
, false, false,
1886 0, TLI
.getLibcallCallingConv(LC
), false,
1887 /*isReturnValueUsed=*/true,
1889 Node
->getDebugLoc());
1891 // Legalize the call sequence, starting with the chain. This will advance
1892 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
1893 // was added by LowerCallTo (guaranteeing proper serialization of calls).
1894 LegalizeOp(CallInfo
.second
);
1895 return CallInfo
.first
;
1898 SDValue
SelectionDAGLegalize::ExpandFPLibCall(SDNode
* Node
,
1899 RTLIB::Libcall Call_F32
,
1900 RTLIB::Libcall Call_F64
,
1901 RTLIB::Libcall Call_F80
,
1902 RTLIB::Libcall Call_PPCF128
) {
1904 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
1905 default: llvm_unreachable("Unexpected request for libcall!");
1906 case MVT::f32
: LC
= Call_F32
; break;
1907 case MVT::f64
: LC
= Call_F64
; break;
1908 case MVT::f80
: LC
= Call_F80
; break;
1909 case MVT::ppcf128
: LC
= Call_PPCF128
; break;
1911 return ExpandLibCall(LC
, Node
, false);
1914 SDValue
SelectionDAGLegalize::ExpandIntLibCall(SDNode
* Node
, bool isSigned
,
1915 RTLIB::Libcall Call_I16
,
1916 RTLIB::Libcall Call_I32
,
1917 RTLIB::Libcall Call_I64
,
1918 RTLIB::Libcall Call_I128
) {
1920 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
1921 default: llvm_unreachable("Unexpected request for libcall!");
1922 case MVT::i16
: LC
= Call_I16
; break;
1923 case MVT::i32
: LC
= Call_I32
; break;
1924 case MVT::i64
: LC
= Call_I64
; break;
1925 case MVT::i128
: LC
= Call_I128
; break;
1927 return ExpandLibCall(LC
, Node
, isSigned
);
1930 /// ExpandLegalINT_TO_FP - This function is responsible for legalizing a
1931 /// INT_TO_FP operation of the specified operand when the target requests that
1932 /// we expand it. At this point, we know that the result and operand types are
1933 /// legal for the target.
1934 SDValue
SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned
,
1938 if (Op0
.getValueType() == MVT::i32
) {
1939 // simple 32-bit [signed|unsigned] integer to float/double expansion
1941 // Get the stack frame index of a 8 byte buffer.
1942 SDValue StackSlot
= DAG
.CreateStackTemporary(MVT::f64
);
1944 // word offset constant for Hi/Lo address computation
1945 SDValue WordOff
= DAG
.getConstant(sizeof(int), TLI
.getPointerTy());
1946 // set up Hi and Lo (into buffer) address based on endian
1947 SDValue Hi
= StackSlot
;
1948 SDValue Lo
= DAG
.getNode(ISD::ADD
, dl
,
1949 TLI
.getPointerTy(), StackSlot
, WordOff
);
1950 if (TLI
.isLittleEndian())
1953 // if signed map to unsigned space
1956 // constant used to invert sign bit (signed to unsigned mapping)
1957 SDValue SignBit
= DAG
.getConstant(0x80000000u
, MVT::i32
);
1958 Op0Mapped
= DAG
.getNode(ISD::XOR
, dl
, MVT::i32
, Op0
, SignBit
);
1962 // store the lo of the constructed double - based on integer input
1963 SDValue Store1
= DAG
.getStore(DAG
.getEntryNode(), dl
,
1964 Op0Mapped
, Lo
, NULL
, 0);
1965 // initial hi portion of constructed double
1966 SDValue InitialHi
= DAG
.getConstant(0x43300000u
, MVT::i32
);
1967 // store the hi of the constructed double - biased exponent
1968 SDValue Store2
=DAG
.getStore(Store1
, dl
, InitialHi
, Hi
, NULL
, 0);
1969 // load the constructed double
1970 SDValue Load
= DAG
.getLoad(MVT::f64
, dl
, Store2
, StackSlot
, NULL
, 0);
1971 // FP constant to bias correct the final result
1972 SDValue Bias
= DAG
.getConstantFP(isSigned
?
1973 BitsToDouble(0x4330000080000000ULL
) :
1974 BitsToDouble(0x4330000000000000ULL
),
1976 // subtract the bias
1977 SDValue Sub
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f64
, Load
, Bias
);
1980 // handle final rounding
1981 if (DestVT
== MVT::f64
) {
1984 } else if (DestVT
.bitsLT(MVT::f64
)) {
1985 Result
= DAG
.getNode(ISD::FP_ROUND
, dl
, DestVT
, Sub
,
1986 DAG
.getIntPtrConstant(0));
1987 } else if (DestVT
.bitsGT(MVT::f64
)) {
1988 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
, DestVT
, Sub
);
1992 assert(!isSigned
&& "Legalize cannot Expand SINT_TO_FP for i64 yet");
1993 SDValue Tmp1
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, DestVT
, Op0
);
1995 SDValue SignSet
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(Op0
.getValueType()),
1996 Op0
, DAG
.getConstant(0, Op0
.getValueType()),
1998 SDValue Zero
= DAG
.getIntPtrConstant(0), Four
= DAG
.getIntPtrConstant(4);
1999 SDValue CstOffset
= DAG
.getNode(ISD::SELECT
, dl
, Zero
.getValueType(),
2000 SignSet
, Four
, Zero
);
2002 // If the sign bit of the integer is set, the large number will be treated
2003 // as a negative number. To counteract this, the dynamic code adds an
2004 // offset depending on the data type.
2006 switch (Op0
.getValueType().getSimpleVT().SimpleTy
) {
2007 default: llvm_unreachable("Unsupported integer type!");
2008 case MVT::i8
: FF
= 0x43800000ULL
; break; // 2^8 (as a float)
2009 case MVT::i16
: FF
= 0x47800000ULL
; break; // 2^16 (as a float)
2010 case MVT::i32
: FF
= 0x4F800000ULL
; break; // 2^32 (as a float)
2011 case MVT::i64
: FF
= 0x5F800000ULL
; break; // 2^64 (as a float)
2013 if (TLI
.isLittleEndian()) FF
<<= 32;
2014 Constant
*FudgeFactor
= ConstantInt::get(
2015 Type::getInt64Ty(*DAG
.getContext()), FF
);
2017 SDValue CPIdx
= DAG
.getConstantPool(FudgeFactor
, TLI
.getPointerTy());
2018 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
2019 CPIdx
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), CPIdx
, CstOffset
);
2020 Alignment
= std::min(Alignment
, 4u);
2022 if (DestVT
== MVT::f32
)
2023 FudgeInReg
= DAG
.getLoad(MVT::f32
, dl
, DAG
.getEntryNode(), CPIdx
,
2024 PseudoSourceValue::getConstantPool(), 0,
2028 LegalizeOp(DAG
.getExtLoad(ISD::EXTLOAD
, dl
, DestVT
,
2029 DAG
.getEntryNode(), CPIdx
,
2030 PseudoSourceValue::getConstantPool(), 0,
2031 MVT::f32
, false, Alignment
));
2034 return DAG
.getNode(ISD::FADD
, dl
, DestVT
, Tmp1
, FudgeInReg
);
2037 /// PromoteLegalINT_TO_FP - This function is responsible for legalizing a
2038 /// *INT_TO_FP operation of the specified operand when the target requests that
2039 /// we promote it. At this point, we know that the result and operand types are
2040 /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
2041 /// operation that takes a larger input.
2042 SDValue
SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp
,
2046 // First step, figure out the appropriate *INT_TO_FP operation to use.
2047 EVT NewInTy
= LegalOp
.getValueType();
2049 unsigned OpToUse
= 0;
2051 // Scan for the appropriate larger type to use.
2053 NewInTy
= (MVT::SimpleValueType
)(NewInTy
.getSimpleVT().SimpleTy
+1);
2054 assert(NewInTy
.isInteger() && "Ran out of possibilities!");
2056 // If the target supports SINT_TO_FP of this type, use it.
2057 if (TLI
.isOperationLegalOrCustom(ISD::SINT_TO_FP
, NewInTy
)) {
2058 OpToUse
= ISD::SINT_TO_FP
;
2061 if (isSigned
) continue;
2063 // If the target supports UINT_TO_FP of this type, use it.
2064 if (TLI
.isOperationLegalOrCustom(ISD::UINT_TO_FP
, NewInTy
)) {
2065 OpToUse
= ISD::UINT_TO_FP
;
2069 // Otherwise, try a larger type.
2072 // Okay, we found the operation and type to use. Zero extend our input to the
2073 // desired type then run the operation on it.
2074 return DAG
.getNode(OpToUse
, dl
, DestVT
,
2075 DAG
.getNode(isSigned
? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
,
2076 dl
, NewInTy
, LegalOp
));
2079 /// PromoteLegalFP_TO_INT - This function is responsible for legalizing a
2080 /// FP_TO_*INT operation of the specified operand when the target requests that
2081 /// we promote it. At this point, we know that the result and operand types are
2082 /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
2083 /// operation that returns a larger result.
2084 SDValue
SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp
,
2088 // First step, figure out the appropriate FP_TO*INT operation to use.
2089 EVT NewOutTy
= DestVT
;
2091 unsigned OpToUse
= 0;
2093 // Scan for the appropriate larger type to use.
2095 NewOutTy
= (MVT::SimpleValueType
)(NewOutTy
.getSimpleVT().SimpleTy
+1);
2096 assert(NewOutTy
.isInteger() && "Ran out of possibilities!");
2098 if (TLI
.isOperationLegalOrCustom(ISD::FP_TO_SINT
, NewOutTy
)) {
2099 OpToUse
= ISD::FP_TO_SINT
;
2103 if (TLI
.isOperationLegalOrCustom(ISD::FP_TO_UINT
, NewOutTy
)) {
2104 OpToUse
= ISD::FP_TO_UINT
;
2108 // Otherwise, try a larger type.
2112 // Okay, we found the operation and type to use.
2113 SDValue Operation
= DAG
.getNode(OpToUse
, dl
, NewOutTy
, LegalOp
);
2115 // Truncate the result of the extended FP_TO_*INT operation to the desired
2117 return DAG
.getNode(ISD::TRUNCATE
, dl
, DestVT
, Operation
);
2120 /// ExpandBSWAP - Open code the operations for BSWAP of the specified operation.
2122 SDValue
SelectionDAGLegalize::ExpandBSWAP(SDValue Op
, DebugLoc dl
) {
2123 EVT VT
= Op
.getValueType();
2124 EVT SHVT
= TLI
.getShiftAmountTy();
2125 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
, Tmp5
, Tmp6
, Tmp7
, Tmp8
;
2126 switch (VT
.getSimpleVT().SimpleTy
) {
2127 default: llvm_unreachable("Unhandled Expand type in BSWAP!");
2129 Tmp2
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2130 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2131 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp1
, Tmp2
);
2133 Tmp4
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2134 Tmp3
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2135 Tmp2
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2136 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2137 Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp3
, DAG
.getConstant(0xFF0000, VT
));
2138 Tmp2
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp2
, DAG
.getConstant(0xFF00, VT
));
2139 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp3
);
2140 Tmp2
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp2
, Tmp1
);
2141 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp2
);
2143 Tmp8
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(56, SHVT
));
2144 Tmp7
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(40, SHVT
));
2145 Tmp6
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2146 Tmp5
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2147 Tmp4
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2148 Tmp3
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2149 Tmp2
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(40, SHVT
));
2150 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(56, SHVT
));
2151 Tmp7
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp7
, DAG
.getConstant(255ULL<<48, VT
));
2152 Tmp6
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp6
, DAG
.getConstant(255ULL<<40, VT
));
2153 Tmp5
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp5
, DAG
.getConstant(255ULL<<32, VT
));
2154 Tmp4
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp4
, DAG
.getConstant(255ULL<<24, VT
));
2155 Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp3
, DAG
.getConstant(255ULL<<16, VT
));
2156 Tmp2
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp2
, DAG
.getConstant(255ULL<<8 , VT
));
2157 Tmp8
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp7
);
2158 Tmp6
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp6
, Tmp5
);
2159 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp3
);
2160 Tmp2
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp2
, Tmp1
);
2161 Tmp8
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp6
);
2162 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp2
);
2163 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp4
);
2167 /// ExpandBitCount - Expand the specified bitcount instruction into operations.
2169 SDValue
SelectionDAGLegalize::ExpandBitCount(unsigned Opc
, SDValue Op
,
2172 default: llvm_unreachable("Cannot expand this yet!");
2174 static const uint64_t mask
[6] = {
2175 0x5555555555555555ULL
, 0x3333333333333333ULL
,
2176 0x0F0F0F0F0F0F0F0FULL
, 0x00FF00FF00FF00FFULL
,
2177 0x0000FFFF0000FFFFULL
, 0x00000000FFFFFFFFULL
2179 EVT VT
= Op
.getValueType();
2180 EVT ShVT
= TLI
.getShiftAmountTy();
2181 unsigned len
= VT
.getSizeInBits();
2182 for (unsigned i
= 0; (1U << i
) <= (len
/ 2); ++i
) {
2183 //x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8])
2184 unsigned EltSize
= VT
.isVector() ?
2185 VT
.getVectorElementType().getSizeInBits() : len
;
2186 SDValue Tmp2
= DAG
.getConstant(APInt(EltSize
, mask
[i
]), VT
);
2187 SDValue Tmp3
= DAG
.getConstant(1ULL << i
, ShVT
);
2188 Op
= DAG
.getNode(ISD::ADD
, dl
, VT
,
2189 DAG
.getNode(ISD::AND
, dl
, VT
, Op
, Tmp2
),
2190 DAG
.getNode(ISD::AND
, dl
, VT
,
2191 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, Tmp3
),
2197 // for now, we do this:
2198 // x = x | (x >> 1);
2199 // x = x | (x >> 2);
2201 // x = x | (x >>16);
2202 // x = x | (x >>32); // for 64-bit input
2203 // return popcount(~x);
2205 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc
2206 EVT VT
= Op
.getValueType();
2207 EVT ShVT
= TLI
.getShiftAmountTy();
2208 unsigned len
= VT
.getSizeInBits();
2209 for (unsigned i
= 0; (1U << i
) <= (len
/ 2); ++i
) {
2210 SDValue Tmp3
= DAG
.getConstant(1ULL << i
, ShVT
);
2211 Op
= DAG
.getNode(ISD::OR
, dl
, VT
, Op
,
2212 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, Tmp3
));
2214 Op
= DAG
.getNOT(dl
, Op
, VT
);
2215 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Op
);
2218 // for now, we use: { return popcount(~x & (x - 1)); }
2219 // unless the target has ctlz but not ctpop, in which case we use:
2220 // { return 32 - nlz(~x & (x-1)); }
2221 // see also http://www.hackersdelight.org/HDcode/ntz.cc
2222 EVT VT
= Op
.getValueType();
2223 SDValue Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
,
2224 DAG
.getNOT(dl
, Op
, VT
),
2225 DAG
.getNode(ISD::SUB
, dl
, VT
, Op
,
2226 DAG
.getConstant(1, VT
)));
2227 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
2228 if (!TLI
.isOperationLegalOrCustom(ISD::CTPOP
, VT
) &&
2229 TLI
.isOperationLegalOrCustom(ISD::CTLZ
, VT
))
2230 return DAG
.getNode(ISD::SUB
, dl
, VT
,
2231 DAG
.getConstant(VT
.getSizeInBits(), VT
),
2232 DAG
.getNode(ISD::CTLZ
, dl
, VT
, Tmp3
));
2233 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Tmp3
);
2238 void SelectionDAGLegalize::ExpandNode(SDNode
*Node
,
2239 SmallVectorImpl
<SDValue
> &Results
) {
2240 DebugLoc dl
= Node
->getDebugLoc();
2241 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
;
2242 switch (Node
->getOpcode()) {
2246 Tmp1
= ExpandBitCount(Node
->getOpcode(), Node
->getOperand(0), dl
);
2247 Results
.push_back(Tmp1
);
2250 Results
.push_back(ExpandBSWAP(Node
->getOperand(0), dl
));
2252 case ISD::FRAMEADDR
:
2253 case ISD::RETURNADDR
:
2254 case ISD::FRAME_TO_ARGS_OFFSET
:
2255 Results
.push_back(DAG
.getConstant(0, Node
->getValueType(0)));
2257 case ISD::FLT_ROUNDS_
:
2258 Results
.push_back(DAG
.getConstant(1, Node
->getValueType(0)));
2260 case ISD::EH_RETURN
:
2261 case ISD::DBG_LABEL
:
2264 case ISD::MEMBARRIER
:
2266 Results
.push_back(Node
->getOperand(0));
2268 case ISD::DBG_STOPPOINT
:
2269 Results
.push_back(ExpandDBG_STOPPOINT(Node
));
2271 case ISD::DYNAMIC_STACKALLOC
:
2272 ExpandDYNAMIC_STACKALLOC(Node
, Results
);
2274 case ISD::MERGE_VALUES
:
2275 for (unsigned i
= 0; i
< Node
->getNumValues(); i
++)
2276 Results
.push_back(Node
->getOperand(i
));
2279 EVT VT
= Node
->getValueType(0);
2281 Results
.push_back(DAG
.getConstant(0, VT
));
2282 else if (VT
.isFloatingPoint())
2283 Results
.push_back(DAG
.getConstantFP(0, VT
));
2285 llvm_unreachable("Unknown value type!");
2289 // If this operation is not supported, lower it to 'abort()' call
2290 TargetLowering::ArgListTy Args
;
2291 std::pair
<SDValue
, SDValue
> CallResult
=
2292 TLI
.LowerCallTo(Node
->getOperand(0), Type::getVoidTy(*DAG
.getContext()),
2293 false, false, false, false, 0, CallingConv::C
, false,
2294 /*isReturnValueUsed=*/true,
2295 DAG
.getExternalSymbol("abort", TLI
.getPointerTy()),
2297 Results
.push_back(CallResult
.second
);
2301 case ISD::BIT_CONVERT
:
2302 Tmp1
= EmitStackConvert(Node
->getOperand(0), Node
->getValueType(0),
2303 Node
->getValueType(0), dl
);
2304 Results
.push_back(Tmp1
);
2306 case ISD::FP_EXTEND
:
2307 Tmp1
= EmitStackConvert(Node
->getOperand(0),
2308 Node
->getOperand(0).getValueType(),
2309 Node
->getValueType(0), dl
);
2310 Results
.push_back(Tmp1
);
2312 case ISD::SIGN_EXTEND_INREG
: {
2313 // NOTE: we could fall back on load/store here too for targets without
2314 // SAR. However, it is doubtful that any exist.
2315 EVT ExtraVT
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
2316 unsigned BitsDiff
= Node
->getValueType(0).getSizeInBits() -
2317 ExtraVT
.getSizeInBits();
2318 SDValue ShiftCst
= DAG
.getConstant(BitsDiff
, TLI
.getShiftAmountTy());
2319 Tmp1
= DAG
.getNode(ISD::SHL
, dl
, Node
->getValueType(0),
2320 Node
->getOperand(0), ShiftCst
);
2321 Tmp1
= DAG
.getNode(ISD::SRA
, dl
, Node
->getValueType(0), Tmp1
, ShiftCst
);
2322 Results
.push_back(Tmp1
);
2325 case ISD::FP_ROUND_INREG
: {
2326 // The only way we can lower this is to turn it into a TRUNCSTORE,
2327 // EXTLOAD pair, targetting a temporary location (a stack slot).
2329 // NOTE: there is a choice here between constantly creating new stack
2330 // slots and always reusing the same one. We currently always create
2331 // new ones, as reuse may inhibit scheduling.
2332 EVT ExtraVT
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
2333 Tmp1
= EmitStackConvert(Node
->getOperand(0), ExtraVT
,
2334 Node
->getValueType(0), dl
);
2335 Results
.push_back(Tmp1
);
2338 case ISD::SINT_TO_FP
:
2339 case ISD::UINT_TO_FP
:
2340 Tmp1
= ExpandLegalINT_TO_FP(Node
->getOpcode() == ISD::SINT_TO_FP
,
2341 Node
->getOperand(0), Node
->getValueType(0), dl
);
2342 Results
.push_back(Tmp1
);
2344 case ISD::FP_TO_UINT
: {
2345 SDValue True
, False
;
2346 EVT VT
= Node
->getOperand(0).getValueType();
2347 EVT NVT
= Node
->getValueType(0);
2348 const uint64_t zero
[] = {0, 0};
2349 APFloat apf
= APFloat(APInt(VT
.getSizeInBits(), 2, zero
));
2350 APInt x
= APInt::getSignBit(NVT
.getSizeInBits());
2351 (void)apf
.convertFromAPInt(x
, false, APFloat::rmNearestTiesToEven
);
2352 Tmp1
= DAG
.getConstantFP(apf
, VT
);
2353 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
),
2354 Node
->getOperand(0),
2356 True
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, NVT
, Node
->getOperand(0));
2357 False
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, NVT
,
2358 DAG
.getNode(ISD::FSUB
, dl
, VT
,
2359 Node
->getOperand(0), Tmp1
));
2360 False
= DAG
.getNode(ISD::XOR
, dl
, NVT
, False
,
2361 DAG
.getConstant(x
, NVT
));
2362 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp2
, True
, False
);
2363 Results
.push_back(Tmp1
);
2367 const Value
*V
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
2368 EVT VT
= Node
->getValueType(0);
2369 Tmp1
= Node
->getOperand(0);
2370 Tmp2
= Node
->getOperand(1);
2371 SDValue VAList
= DAG
.getLoad(TLI
.getPointerTy(), dl
, Tmp1
, Tmp2
, V
, 0);
2372 // Increment the pointer, VAList, to the next vaarg
2373 Tmp3
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), VAList
,
2374 DAG
.getConstant(TLI
.getTargetData()->
2375 getTypeAllocSize(VT
.getTypeForEVT(*DAG
.getContext())),
2376 TLI
.getPointerTy()));
2377 // Store the incremented VAList to the legalized pointer
2378 Tmp3
= DAG
.getStore(VAList
.getValue(1), dl
, Tmp3
, Tmp2
, V
, 0);
2379 // Load the actual argument out of the pointer VAList
2380 Results
.push_back(DAG
.getLoad(VT
, dl
, Tmp3
, VAList
, NULL
, 0));
2381 Results
.push_back(Results
[0].getValue(1));
2385 // This defaults to loading a pointer from the input and storing it to the
2386 // output, returning the chain.
2387 const Value
*VD
= cast
<SrcValueSDNode
>(Node
->getOperand(3))->getValue();
2388 const Value
*VS
= cast
<SrcValueSDNode
>(Node
->getOperand(4))->getValue();
2389 Tmp1
= DAG
.getLoad(TLI
.getPointerTy(), dl
, Node
->getOperand(0),
2390 Node
->getOperand(2), VS
, 0);
2391 Tmp1
= DAG
.getStore(Tmp1
.getValue(1), dl
, Tmp1
, Node
->getOperand(1), VD
, 0);
2392 Results
.push_back(Tmp1
);
2395 case ISD::EXTRACT_VECTOR_ELT
:
2396 if (Node
->getOperand(0).getValueType().getVectorNumElements() == 1)
2397 // This must be an access of the only element. Return it.
2398 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, Node
->getValueType(0),
2399 Node
->getOperand(0));
2401 Tmp1
= ExpandExtractFromVectorThroughStack(SDValue(Node
, 0));
2402 Results
.push_back(Tmp1
);
2404 case ISD::EXTRACT_SUBVECTOR
:
2405 Results
.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node
, 0)));
2407 case ISD::CONCAT_VECTORS
: {
2408 Results
.push_back(ExpandVectorBuildThroughStack(Node
));
2411 case ISD::SCALAR_TO_VECTOR
:
2412 Results
.push_back(ExpandSCALAR_TO_VECTOR(Node
));
2414 case ISD::INSERT_VECTOR_ELT
:
2415 Results
.push_back(ExpandINSERT_VECTOR_ELT(Node
->getOperand(0),
2416 Node
->getOperand(1),
2417 Node
->getOperand(2), dl
));
2419 case ISD::VECTOR_SHUFFLE
: {
2420 SmallVector
<int, 8> Mask
;
2421 cast
<ShuffleVectorSDNode
>(Node
)->getMask(Mask
);
2423 EVT VT
= Node
->getValueType(0);
2424 EVT EltVT
= VT
.getVectorElementType();
2425 unsigned NumElems
= VT
.getVectorNumElements();
2426 SmallVector
<SDValue
, 8> Ops
;
2427 for (unsigned i
= 0; i
!= NumElems
; ++i
) {
2429 Ops
.push_back(DAG
.getUNDEF(EltVT
));
2432 unsigned Idx
= Mask
[i
];
2434 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
2435 Node
->getOperand(0),
2436 DAG
.getIntPtrConstant(Idx
)));
2438 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
2439 Node
->getOperand(1),
2440 DAG
.getIntPtrConstant(Idx
- NumElems
)));
2442 Tmp1
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, VT
, &Ops
[0], Ops
.size());
2443 Results
.push_back(Tmp1
);
2446 case ISD::EXTRACT_ELEMENT
: {
2447 EVT OpTy
= Node
->getOperand(0).getValueType();
2448 if (cast
<ConstantSDNode
>(Node
->getOperand(1))->getZExtValue()) {
2450 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, OpTy
, Node
->getOperand(0),
2451 DAG
.getConstant(OpTy
.getSizeInBits()/2,
2452 TLI
.getShiftAmountTy()));
2453 Tmp1
= DAG
.getNode(ISD::TRUNCATE
, dl
, Node
->getValueType(0), Tmp1
);
2456 Tmp1
= DAG
.getNode(ISD::TRUNCATE
, dl
, Node
->getValueType(0),
2457 Node
->getOperand(0));
2459 Results
.push_back(Tmp1
);
2462 case ISD::STACKSAVE
:
2463 // Expand to CopyFromReg if the target set
2464 // StackPointerRegisterToSaveRestore.
2465 if (unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore()) {
2466 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(0), dl
, SP
,
2467 Node
->getValueType(0)));
2468 Results
.push_back(Results
[0].getValue(1));
2470 Results
.push_back(DAG
.getUNDEF(Node
->getValueType(0)));
2471 Results
.push_back(Node
->getOperand(0));
2474 case ISD::STACKRESTORE
:
2475 // Expand to CopyToReg if the target set
2476 // StackPointerRegisterToSaveRestore.
2477 if (unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore()) {
2478 Results
.push_back(DAG
.getCopyToReg(Node
->getOperand(0), dl
, SP
,
2479 Node
->getOperand(1)));
2481 Results
.push_back(Node
->getOperand(0));
2484 case ISD::FCOPYSIGN
:
2485 Results
.push_back(ExpandFCOPYSIGN(Node
));
2488 // Expand Y = FNEG(X) -> Y = SUB -0.0, X
2489 Tmp1
= DAG
.getConstantFP(-0.0, Node
->getValueType(0));
2490 Tmp1
= DAG
.getNode(ISD::FSUB
, dl
, Node
->getValueType(0), Tmp1
,
2491 Node
->getOperand(0));
2492 Results
.push_back(Tmp1
);
2495 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X).
2496 EVT VT
= Node
->getValueType(0);
2497 Tmp1
= Node
->getOperand(0);
2498 Tmp2
= DAG
.getConstantFP(0.0, VT
);
2499 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(Tmp1
.getValueType()),
2500 Tmp1
, Tmp2
, ISD::SETUGT
);
2501 Tmp3
= DAG
.getNode(ISD::FNEG
, dl
, VT
, Tmp1
);
2502 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, VT
, Tmp2
, Tmp1
, Tmp3
);
2503 Results
.push_back(Tmp1
);
2507 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::SQRT_F32
, RTLIB::SQRT_F64
,
2508 RTLIB::SQRT_F80
, RTLIB::SQRT_PPCF128
));
2511 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::SIN_F32
, RTLIB::SIN_F64
,
2512 RTLIB::SIN_F80
, RTLIB::SIN_PPCF128
));
2515 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::COS_F32
, RTLIB::COS_F64
,
2516 RTLIB::COS_F80
, RTLIB::COS_PPCF128
));
2519 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG_F32
, RTLIB::LOG_F64
,
2520 RTLIB::LOG_F80
, RTLIB::LOG_PPCF128
));
2523 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG2_F32
, RTLIB::LOG2_F64
,
2524 RTLIB::LOG2_F80
, RTLIB::LOG2_PPCF128
));
2527 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG10_F32
, RTLIB::LOG10_F64
,
2528 RTLIB::LOG10_F80
, RTLIB::LOG10_PPCF128
));
2531 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::EXP_F32
, RTLIB::EXP_F64
,
2532 RTLIB::EXP_F80
, RTLIB::EXP_PPCF128
));
2535 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::EXP2_F32
, RTLIB::EXP2_F64
,
2536 RTLIB::EXP2_F80
, RTLIB::EXP2_PPCF128
));
2539 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::TRUNC_F32
, RTLIB::TRUNC_F64
,
2540 RTLIB::TRUNC_F80
, RTLIB::TRUNC_PPCF128
));
2543 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::FLOOR_F32
, RTLIB::FLOOR_F64
,
2544 RTLIB::FLOOR_F80
, RTLIB::FLOOR_PPCF128
));
2547 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::CEIL_F32
, RTLIB::CEIL_F64
,
2548 RTLIB::CEIL_F80
, RTLIB::CEIL_PPCF128
));
2551 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::RINT_F32
, RTLIB::RINT_F64
,
2552 RTLIB::RINT_F80
, RTLIB::RINT_PPCF128
));
2554 case ISD::FNEARBYINT
:
2555 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::NEARBYINT_F32
,
2556 RTLIB::NEARBYINT_F64
,
2557 RTLIB::NEARBYINT_F80
,
2558 RTLIB::NEARBYINT_PPCF128
));
2561 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::POWI_F32
, RTLIB::POWI_F64
,
2562 RTLIB::POWI_F80
, RTLIB::POWI_PPCF128
));
2565 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::POW_F32
, RTLIB::POW_F64
,
2566 RTLIB::POW_F80
, RTLIB::POW_PPCF128
));
2569 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::DIV_F32
, RTLIB::DIV_F64
,
2570 RTLIB::DIV_F80
, RTLIB::DIV_PPCF128
));
2573 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::REM_F32
, RTLIB::REM_F64
,
2574 RTLIB::REM_F80
, RTLIB::REM_PPCF128
));
2576 case ISD::ConstantFP
: {
2577 ConstantFPSDNode
*CFP
= cast
<ConstantFPSDNode
>(Node
);
2578 // Check to see if this FP immediate is already legal.
2579 bool isLegal
= false;
2580 for (TargetLowering::legal_fpimm_iterator I
= TLI
.legal_fpimm_begin(),
2581 E
= TLI
.legal_fpimm_end(); I
!= E
; ++I
) {
2582 if (CFP
->isExactlyValue(*I
)) {
2587 // If this is a legal constant, turn it into a TargetConstantFP node.
2589 Results
.push_back(SDValue(Node
, 0));
2591 Results
.push_back(ExpandConstantFP(CFP
, true, DAG
, TLI
));
2594 case ISD::EHSELECTION
: {
2595 unsigned Reg
= TLI
.getExceptionSelectorRegister();
2596 assert(Reg
&& "Can't expand to unknown register!");
2597 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(1), dl
, Reg
,
2598 Node
->getValueType(0)));
2599 Results
.push_back(Results
[0].getValue(1));
2602 case ISD::EXCEPTIONADDR
: {
2603 unsigned Reg
= TLI
.getExceptionAddressRegister();
2604 assert(Reg
&& "Can't expand to unknown register!");
2605 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(0), dl
, Reg
,
2606 Node
->getValueType(0)));
2607 Results
.push_back(Results
[0].getValue(1));
2611 EVT VT
= Node
->getValueType(0);
2612 assert(TLI
.isOperationLegalOrCustom(ISD::ADD
, VT
) &&
2613 TLI
.isOperationLegalOrCustom(ISD::XOR
, VT
) &&
2614 "Don't know how to expand this subtraction!");
2615 Tmp1
= DAG
.getNode(ISD::XOR
, dl
, VT
, Node
->getOperand(1),
2616 DAG
.getConstant(APInt::getAllOnesValue(VT
.getSizeInBits()), VT
));
2617 Tmp1
= DAG
.getNode(ISD::ADD
, dl
, VT
, Tmp2
, DAG
.getConstant(1, VT
));
2618 Results
.push_back(DAG
.getNode(ISD::ADD
, dl
, VT
, Node
->getOperand(0), Tmp1
));
2623 EVT VT
= Node
->getValueType(0);
2624 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2625 bool isSigned
= Node
->getOpcode() == ISD::SREM
;
2626 unsigned DivOpc
= isSigned
? ISD::SDIV
: ISD::UDIV
;
2627 unsigned DivRemOpc
= isSigned
? ISD::SDIVREM
: ISD::UDIVREM
;
2628 Tmp2
= Node
->getOperand(0);
2629 Tmp3
= Node
->getOperand(1);
2630 if (TLI
.isOperationLegalOrCustom(DivRemOpc
, VT
)) {
2631 Tmp1
= DAG
.getNode(DivRemOpc
, dl
, VTs
, Tmp2
, Tmp3
).getValue(1);
2632 } else if (TLI
.isOperationLegalOrCustom(DivOpc
, VT
)) {
2634 Tmp1
= DAG
.getNode(DivOpc
, dl
, VT
, Tmp2
, Tmp3
);
2635 Tmp1
= DAG
.getNode(ISD::MUL
, dl
, VT
, Tmp1
, Tmp3
);
2636 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, VT
, Tmp2
, Tmp1
);
2637 } else if (isSigned
) {
2638 Tmp1
= ExpandIntLibCall(Node
, true, RTLIB::SREM_I16
, RTLIB::SREM_I32
,
2639 RTLIB::SREM_I64
, RTLIB::SREM_I128
);
2641 Tmp1
= ExpandIntLibCall(Node
, false, RTLIB::UREM_I16
, RTLIB::UREM_I32
,
2642 RTLIB::UREM_I64
, RTLIB::UREM_I128
);
2644 Results
.push_back(Tmp1
);
2649 bool isSigned
= Node
->getOpcode() == ISD::SDIV
;
2650 unsigned DivRemOpc
= isSigned
? ISD::SDIVREM
: ISD::UDIVREM
;
2651 EVT VT
= Node
->getValueType(0);
2652 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2653 if (TLI
.isOperationLegalOrCustom(DivRemOpc
, VT
))
2654 Tmp1
= DAG
.getNode(DivRemOpc
, dl
, VTs
, Node
->getOperand(0),
2655 Node
->getOperand(1));
2657 Tmp1
= ExpandIntLibCall(Node
, true, RTLIB::SDIV_I16
, RTLIB::SDIV_I32
,
2658 RTLIB::SDIV_I64
, RTLIB::SDIV_I128
);
2660 Tmp1
= ExpandIntLibCall(Node
, false, RTLIB::UDIV_I16
, RTLIB::UDIV_I32
,
2661 RTLIB::UDIV_I64
, RTLIB::UDIV_I128
);
2662 Results
.push_back(Tmp1
);
2667 unsigned ExpandOpcode
= Node
->getOpcode() == ISD::MULHU
? ISD::UMUL_LOHI
:
2669 EVT VT
= Node
->getValueType(0);
2670 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2671 assert(TLI
.isOperationLegalOrCustom(ExpandOpcode
, VT
) &&
2672 "If this wasn't legal, it shouldn't have been created!");
2673 Tmp1
= DAG
.getNode(ExpandOpcode
, dl
, VTs
, Node
->getOperand(0),
2674 Node
->getOperand(1));
2675 Results
.push_back(Tmp1
.getValue(1));
2679 EVT VT
= Node
->getValueType(0);
2680 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2681 // See if multiply or divide can be lowered using two-result operations.
2682 // We just need the low half of the multiply; try both the signed
2683 // and unsigned forms. If the target supports both SMUL_LOHI and
2684 // UMUL_LOHI, form a preference by checking which forms of plain
2685 // MULH it supports.
2686 bool HasSMUL_LOHI
= TLI
.isOperationLegalOrCustom(ISD::SMUL_LOHI
, VT
);
2687 bool HasUMUL_LOHI
= TLI
.isOperationLegalOrCustom(ISD::UMUL_LOHI
, VT
);
2688 bool HasMULHS
= TLI
.isOperationLegalOrCustom(ISD::MULHS
, VT
);
2689 bool HasMULHU
= TLI
.isOperationLegalOrCustom(ISD::MULHU
, VT
);
2690 unsigned OpToUse
= 0;
2691 if (HasSMUL_LOHI
&& !HasMULHS
) {
2692 OpToUse
= ISD::SMUL_LOHI
;
2693 } else if (HasUMUL_LOHI
&& !HasMULHU
) {
2694 OpToUse
= ISD::UMUL_LOHI
;
2695 } else if (HasSMUL_LOHI
) {
2696 OpToUse
= ISD::SMUL_LOHI
;
2697 } else if (HasUMUL_LOHI
) {
2698 OpToUse
= ISD::UMUL_LOHI
;
2701 Results
.push_back(DAG
.getNode(OpToUse
, dl
, VTs
, Node
->getOperand(0),
2702 Node
->getOperand(1)));
2705 Tmp1
= ExpandIntLibCall(Node
, false, RTLIB::MUL_I16
, RTLIB::MUL_I32
,
2706 RTLIB::MUL_I64
, RTLIB::MUL_I128
);
2707 Results
.push_back(Tmp1
);
2712 SDValue LHS
= Node
->getOperand(0);
2713 SDValue RHS
= Node
->getOperand(1);
2714 SDValue Sum
= DAG
.getNode(Node
->getOpcode() == ISD::SADDO
?
2715 ISD::ADD
: ISD::SUB
, dl
, LHS
.getValueType(),
2717 Results
.push_back(Sum
);
2718 EVT OType
= Node
->getValueType(1);
2720 SDValue Zero
= DAG
.getConstant(0, LHS
.getValueType());
2722 // LHSSign -> LHS >= 0
2723 // RHSSign -> RHS >= 0
2724 // SumSign -> Sum >= 0
2727 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
2729 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
2731 SDValue LHSSign
= DAG
.getSetCC(dl
, OType
, LHS
, Zero
, ISD::SETGE
);
2732 SDValue RHSSign
= DAG
.getSetCC(dl
, OType
, RHS
, Zero
, ISD::SETGE
);
2733 SDValue SignsMatch
= DAG
.getSetCC(dl
, OType
, LHSSign
, RHSSign
,
2734 Node
->getOpcode() == ISD::SADDO
?
2735 ISD::SETEQ
: ISD::SETNE
);
2737 SDValue SumSign
= DAG
.getSetCC(dl
, OType
, Sum
, Zero
, ISD::SETGE
);
2738 SDValue SumSignNE
= DAG
.getSetCC(dl
, OType
, LHSSign
, SumSign
, ISD::SETNE
);
2740 SDValue Cmp
= DAG
.getNode(ISD::AND
, dl
, OType
, SignsMatch
, SumSignNE
);
2741 Results
.push_back(Cmp
);
2746 SDValue LHS
= Node
->getOperand(0);
2747 SDValue RHS
= Node
->getOperand(1);
2748 SDValue Sum
= DAG
.getNode(Node
->getOpcode() == ISD::UADDO
?
2749 ISD::ADD
: ISD::SUB
, dl
, LHS
.getValueType(),
2751 Results
.push_back(Sum
);
2752 Results
.push_back(DAG
.getSetCC(dl
, Node
->getValueType(1), Sum
, LHS
,
2753 Node
->getOpcode () == ISD::UADDO
?
2754 ISD::SETULT
: ISD::SETUGT
));
2759 EVT VT
= Node
->getValueType(0);
2760 SDValue LHS
= Node
->getOperand(0);
2761 SDValue RHS
= Node
->getOperand(1);
2764 static unsigned Ops
[2][3] =
2765 { { ISD::MULHU
, ISD::UMUL_LOHI
, ISD::ZERO_EXTEND
},
2766 { ISD::MULHS
, ISD::SMUL_LOHI
, ISD::SIGN_EXTEND
}};
2767 bool isSigned
= Node
->getOpcode() == ISD::SMULO
;
2768 if (TLI
.isOperationLegalOrCustom(Ops
[isSigned
][0], VT
)) {
2769 BottomHalf
= DAG
.getNode(ISD::MUL
, dl
, VT
, LHS
, RHS
);
2770 TopHalf
= DAG
.getNode(Ops
[isSigned
][0], dl
, VT
, LHS
, RHS
);
2771 } else if (TLI
.isOperationLegalOrCustom(Ops
[isSigned
][1], VT
)) {
2772 BottomHalf
= DAG
.getNode(Ops
[isSigned
][1], dl
, DAG
.getVTList(VT
, VT
), LHS
,
2774 TopHalf
= BottomHalf
.getValue(1);
2775 } else if (TLI
.isTypeLegal(EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits() * 2))) {
2776 EVT WideVT
= EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits() * 2);
2777 LHS
= DAG
.getNode(Ops
[isSigned
][2], dl
, WideVT
, LHS
);
2778 RHS
= DAG
.getNode(Ops
[isSigned
][2], dl
, WideVT
, RHS
);
2779 Tmp1
= DAG
.getNode(ISD::MUL
, dl
, WideVT
, LHS
, RHS
);
2780 BottomHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Tmp1
,
2781 DAG
.getIntPtrConstant(0));
2782 TopHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Tmp1
,
2783 DAG
.getIntPtrConstant(1));
2785 // FIXME: We should be able to fall back to a libcall with an illegal
2786 // type in some cases cases.
2787 // Also, we can fall back to a division in some cases, but that's a big
2788 // performance hit in the general case.
2789 llvm_unreachable("Don't know how to expand this operation yet!");
2792 Tmp1
= DAG
.getConstant(VT
.getSizeInBits() - 1, TLI
.getShiftAmountTy());
2793 Tmp1
= DAG
.getNode(ISD::SRA
, dl
, VT
, BottomHalf
, Tmp1
);
2794 TopHalf
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
), TopHalf
, Tmp1
,
2797 TopHalf
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
), TopHalf
,
2798 DAG
.getConstant(0, VT
), ISD::SETNE
);
2800 Results
.push_back(BottomHalf
);
2801 Results
.push_back(TopHalf
);
2804 case ISD::BUILD_PAIR
: {
2805 EVT PairTy
= Node
->getValueType(0);
2806 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, PairTy
, Node
->getOperand(0));
2807 Tmp2
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, PairTy
, Node
->getOperand(1));
2808 Tmp2
= DAG
.getNode(ISD::SHL
, dl
, PairTy
, Tmp2
,
2809 DAG
.getConstant(PairTy
.getSizeInBits()/2,
2810 TLI
.getShiftAmountTy()));
2811 Results
.push_back(DAG
.getNode(ISD::OR
, dl
, PairTy
, Tmp1
, Tmp2
));
2815 Tmp1
= Node
->getOperand(0);
2816 Tmp2
= Node
->getOperand(1);
2817 Tmp3
= Node
->getOperand(2);
2818 if (Tmp1
.getOpcode() == ISD::SETCC
) {
2819 Tmp1
= DAG
.getSelectCC(dl
, Tmp1
.getOperand(0), Tmp1
.getOperand(1),
2821 cast
<CondCodeSDNode
>(Tmp1
.getOperand(2))->get());
2823 Tmp1
= DAG
.getSelectCC(dl
, Tmp1
,
2824 DAG
.getConstant(0, Tmp1
.getValueType()),
2825 Tmp2
, Tmp3
, ISD::SETNE
);
2827 Results
.push_back(Tmp1
);
2830 SDValue Chain
= Node
->getOperand(0);
2831 SDValue Table
= Node
->getOperand(1);
2832 SDValue Index
= Node
->getOperand(2);
2834 EVT PTy
= TLI
.getPointerTy();
2835 MachineFunction
&MF
= DAG
.getMachineFunction();
2836 unsigned EntrySize
= MF
.getJumpTableInfo()->getEntrySize();
2837 Index
= DAG
.getNode(ISD::MUL
, dl
, PTy
,
2838 Index
, DAG
.getConstant(EntrySize
, PTy
));
2839 SDValue Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Index
, Table
);
2841 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(), EntrySize
* 8);
2842 SDValue LD
= DAG
.getExtLoad(ISD::SEXTLOAD
, dl
, PTy
, Chain
, Addr
,
2843 PseudoSourceValue::getJumpTable(), 0, MemVT
);
2845 if (TLI
.getTargetMachine().getRelocationModel() == Reloc::PIC_
) {
2846 // For PIC, the sequence is:
2847 // BRIND(load(Jumptable + index) + RelocBase)
2848 // RelocBase can be JumpTable, GOT or some sort of global base.
2849 Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Addr
,
2850 TLI
.getPICJumpTableRelocBase(Table
, DAG
));
2852 Tmp1
= DAG
.getNode(ISD::BRIND
, dl
, MVT::Other
, LD
.getValue(1), Addr
);
2853 Results
.push_back(Tmp1
);
2857 // Expand brcond's setcc into its constituent parts and create a BR_CC
2859 Tmp1
= Node
->getOperand(0);
2860 Tmp2
= Node
->getOperand(1);
2861 if (Tmp2
.getOpcode() == ISD::SETCC
) {
2862 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, MVT::Other
,
2863 Tmp1
, Tmp2
.getOperand(2),
2864 Tmp2
.getOperand(0), Tmp2
.getOperand(1),
2865 Node
->getOperand(2));
2867 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, MVT::Other
, Tmp1
,
2868 DAG
.getCondCode(ISD::SETNE
), Tmp2
,
2869 DAG
.getConstant(0, Tmp2
.getValueType()),
2870 Node
->getOperand(2));
2872 Results
.push_back(Tmp1
);
2875 Tmp1
= Node
->getOperand(0);
2876 Tmp2
= Node
->getOperand(1);
2877 Tmp3
= Node
->getOperand(2);
2878 LegalizeSetCCCondCode(Node
->getValueType(0), Tmp1
, Tmp2
, Tmp3
, dl
);
2880 // If we expanded the SETCC into an AND/OR, return the new node
2881 if (Tmp2
.getNode() == 0) {
2882 Results
.push_back(Tmp1
);
2886 // Otherwise, SETCC for the given comparison type must be completely
2887 // illegal; expand it into a SELECT_CC.
2888 EVT VT
= Node
->getValueType(0);
2889 Tmp1
= DAG
.getNode(ISD::SELECT_CC
, dl
, VT
, Tmp1
, Tmp2
,
2890 DAG
.getConstant(1, VT
), DAG
.getConstant(0, VT
), Tmp3
);
2891 Results
.push_back(Tmp1
);
2894 case ISD::SELECT_CC
: {
2895 Tmp1
= Node
->getOperand(0); // LHS
2896 Tmp2
= Node
->getOperand(1); // RHS
2897 Tmp3
= Node
->getOperand(2); // True
2898 Tmp4
= Node
->getOperand(3); // False
2899 SDValue CC
= Node
->getOperand(4);
2901 LegalizeSetCCCondCode(TLI
.getSetCCResultType(Tmp1
.getValueType()),
2902 Tmp1
, Tmp2
, CC
, dl
);
2904 assert(!Tmp2
.getNode() && "Can't legalize SELECT_CC with legal condition!");
2905 Tmp2
= DAG
.getConstant(0, Tmp1
.getValueType());
2906 CC
= DAG
.getCondCode(ISD::SETNE
);
2907 Tmp1
= DAG
.getNode(ISD::SELECT_CC
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
2909 Results
.push_back(Tmp1
);
2913 Tmp1
= Node
->getOperand(0); // Chain
2914 Tmp2
= Node
->getOperand(2); // LHS
2915 Tmp3
= Node
->getOperand(3); // RHS
2916 Tmp4
= Node
->getOperand(1); // CC
2918 LegalizeSetCCCondCode(TLI
.getSetCCResultType(Tmp2
.getValueType()),
2919 Tmp2
, Tmp3
, Tmp4
, dl
);
2920 LastCALLSEQ_END
= DAG
.getEntryNode();
2922 assert(!Tmp3
.getNode() && "Can't legalize BR_CC with legal condition!");
2923 Tmp3
= DAG
.getConstant(0, Tmp2
.getValueType());
2924 Tmp4
= DAG
.getCondCode(ISD::SETNE
);
2925 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, Node
->getValueType(0), Tmp1
, Tmp4
, Tmp2
,
2926 Tmp3
, Node
->getOperand(4));
2927 Results
.push_back(Tmp1
);
2930 case ISD::GLOBAL_OFFSET_TABLE
:
2931 case ISD::GlobalAddress
:
2932 case ISD::GlobalTLSAddress
:
2933 case ISD::ExternalSymbol
:
2934 case ISD::ConstantPool
:
2935 case ISD::JumpTable
:
2936 case ISD::INTRINSIC_W_CHAIN
:
2937 case ISD::INTRINSIC_WO_CHAIN
:
2938 case ISD::INTRINSIC_VOID
:
2939 // FIXME: Custom lowering for these operations shouldn't return null!
2940 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
2941 Results
.push_back(SDValue(Node
, i
));
2945 void SelectionDAGLegalize::PromoteNode(SDNode
*Node
,
2946 SmallVectorImpl
<SDValue
> &Results
) {
2947 EVT OVT
= Node
->getValueType(0);
2948 if (Node
->getOpcode() == ISD::UINT_TO_FP
||
2949 Node
->getOpcode() == ISD::SINT_TO_FP
||
2950 Node
->getOpcode() == ISD::SETCC
) {
2951 OVT
= Node
->getOperand(0).getValueType();
2953 EVT NVT
= TLI
.getTypeToPromoteTo(Node
->getOpcode(), OVT
);
2954 DebugLoc dl
= Node
->getDebugLoc();
2955 SDValue Tmp1
, Tmp2
, Tmp3
;
2956 switch (Node
->getOpcode()) {
2960 // Zero extend the argument.
2961 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, NVT
, Node
->getOperand(0));
2962 // Perform the larger operation.
2963 Tmp1
= DAG
.getNode(Node
->getOpcode(), dl
, NVT
, Tmp1
);
2964 if (Node
->getOpcode() == ISD::CTTZ
) {
2965 //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT)
2966 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(NVT
),
2967 Tmp1
, DAG
.getConstant(NVT
.getSizeInBits(), NVT
),
2969 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp2
,
2970 DAG
.getConstant(OVT
.getSizeInBits(), NVT
), Tmp1
);
2971 } else if (Node
->getOpcode() == ISD::CTLZ
) {
2972 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
2973 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, NVT
, Tmp1
,
2974 DAG
.getConstant(NVT
.getSizeInBits() -
2975 OVT
.getSizeInBits(), NVT
));
2977 Results
.push_back(DAG
.getNode(ISD::TRUNCATE
, dl
, OVT
, Tmp1
));
2980 unsigned DiffBits
= NVT
.getSizeInBits() - OVT
.getSizeInBits();
2981 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, NVT
, Tmp1
);
2982 Tmp1
= DAG
.getNode(ISD::BSWAP
, dl
, NVT
, Tmp1
);
2983 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, NVT
, Tmp1
,
2984 DAG
.getConstant(DiffBits
, TLI
.getShiftAmountTy()));
2985 Results
.push_back(Tmp1
);
2988 case ISD::FP_TO_UINT
:
2989 case ISD::FP_TO_SINT
:
2990 Tmp1
= PromoteLegalFP_TO_INT(Node
->getOperand(0), Node
->getValueType(0),
2991 Node
->getOpcode() == ISD::FP_TO_SINT
, dl
);
2992 Results
.push_back(Tmp1
);
2994 case ISD::UINT_TO_FP
:
2995 case ISD::SINT_TO_FP
:
2996 Tmp1
= PromoteLegalINT_TO_FP(Node
->getOperand(0), Node
->getValueType(0),
2997 Node
->getOpcode() == ISD::SINT_TO_FP
, dl
);
2998 Results
.push_back(Tmp1
);
3003 unsigned ExtOp
, TruncOp
;
3004 if (OVT
.isVector()) {
3005 ExtOp
= ISD::BIT_CONVERT
;
3006 TruncOp
= ISD::BIT_CONVERT
;
3007 } else if (OVT
.isInteger()) {
3008 ExtOp
= ISD::ANY_EXTEND
;
3009 TruncOp
= ISD::TRUNCATE
;
3011 llvm_report_error("Cannot promote logic operation");
3013 // Promote each of the values to the new type.
3014 Tmp1
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(0));
3015 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3016 // Perform the larger operation, then convert back
3017 Tmp1
= DAG
.getNode(Node
->getOpcode(), dl
, NVT
, Tmp1
, Tmp2
);
3018 Results
.push_back(DAG
.getNode(TruncOp
, dl
, OVT
, Tmp1
));
3022 unsigned ExtOp
, TruncOp
;
3023 if (Node
->getValueType(0).isVector()) {
3024 ExtOp
= ISD::BIT_CONVERT
;
3025 TruncOp
= ISD::BIT_CONVERT
;
3026 } else if (Node
->getValueType(0).isInteger()) {
3027 ExtOp
= ISD::ANY_EXTEND
;
3028 TruncOp
= ISD::TRUNCATE
;
3030 ExtOp
= ISD::FP_EXTEND
;
3031 TruncOp
= ISD::FP_ROUND
;
3033 Tmp1
= Node
->getOperand(0);
3034 // Promote each of the values to the new type.
3035 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3036 Tmp3
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(2));
3037 // Perform the larger operation, then round down.
3038 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp1
, Tmp2
, Tmp3
);
3039 if (TruncOp
!= ISD::FP_ROUND
)
3040 Tmp1
= DAG
.getNode(TruncOp
, dl
, Node
->getValueType(0), Tmp1
);
3042 Tmp1
= DAG
.getNode(TruncOp
, dl
, Node
->getValueType(0), Tmp1
,
3043 DAG
.getIntPtrConstant(0));
3044 Results
.push_back(Tmp1
);
3047 case ISD::VECTOR_SHUFFLE
: {
3048 SmallVector
<int, 8> Mask
;
3049 cast
<ShuffleVectorSDNode
>(Node
)->getMask(Mask
);
3051 // Cast the two input vectors.
3052 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, NVT
, Node
->getOperand(0));
3053 Tmp2
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, NVT
, Node
->getOperand(1));
3055 // Convert the shuffle mask to the right # elements.
3056 Tmp1
= ShuffleWithNarrowerEltType(NVT
, OVT
, dl
, Tmp1
, Tmp2
, Mask
);
3057 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, OVT
, Tmp1
);
3058 Results
.push_back(Tmp1
);
3062 unsigned ExtOp
= ISD::FP_EXTEND
;
3063 if (NVT
.isInteger()) {
3064 ISD::CondCode CCCode
=
3065 cast
<CondCodeSDNode
>(Node
->getOperand(2))->get();
3066 ExtOp
= isSignedIntSetCC(CCCode
) ? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
;
3068 Tmp1
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(0));
3069 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3070 Results
.push_back(DAG
.getNode(ISD::SETCC
, dl
, Node
->getValueType(0),
3071 Tmp1
, Tmp2
, Node
->getOperand(2)));
3077 // SelectionDAG::Legalize - This is the entry point for the file.
3079 void SelectionDAG::Legalize(bool TypesNeedLegalizing
,
3080 CodeGenOpt::Level OptLevel
) {
3081 /// run - This is the main entry point to this class.
3083 SelectionDAGLegalize(*this, OptLevel
).LegalizeDAG();