1 //===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SelectionDAG::Legalize method.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineJumpTableInfo.h"
18 #include "llvm/CodeGen/MachineModuleInfo.h"
19 #include "llvm/Analysis/DebugInfo.h"
20 #include "llvm/CodeGen/PseudoSourceValue.h"
21 #include "llvm/Target/TargetFrameInfo.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetOptions.h"
26 #include "llvm/CallingConv.h"
27 #include "llvm/Constants.h"
28 #include "llvm/DerivedTypes.h"
29 #include "llvm/Function.h"
30 #include "llvm/GlobalVariable.h"
31 #include "llvm/LLVMContext.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/MathExtras.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/ADT/DenseMap.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/SmallPtrSet.h"
42 //===----------------------------------------------------------------------===//
43 /// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and
44 /// hacks on it until the target machine can handle it. This involves
45 /// eliminating value sizes the machine cannot handle (promoting small sizes to
46 /// large sizes or splitting up large values into small values) as well as
47 /// eliminating operations the machine cannot handle.
49 /// This code also does a small amount of optimization and recognition of idioms
50 /// as part of its processing. For example, if a target does not support a
51 /// 'setcc' instruction efficiently, but does support 'brcc' instruction, this
52 /// will attempt merge setcc and brc instructions into brcc's.
55 class SelectionDAGLegalize
{
56 const TargetMachine
&TM
;
57 const TargetLowering
&TLI
;
59 CodeGenOpt::Level OptLevel
;
61 // Libcall insertion helpers.
63 /// LastCALLSEQ_END - This keeps track of the CALLSEQ_END node that has been
64 /// legalized. We use this to ensure that calls are properly serialized
65 /// against each other, including inserted libcalls.
66 SDValue LastCALLSEQ_END
;
68 /// IsLegalizingCall - This member is used *only* for purposes of providing
69 /// helpful assertions that a libcall isn't created while another call is
70 /// being legalized (which could lead to non-serialized call sequences).
71 bool IsLegalizingCall
;
74 Legal
, // The target natively supports this operation.
75 Promote
, // This operation should be executed in a larger type.
76 Expand
// Try to expand this to other ops, otherwise use a libcall.
79 /// ValueTypeActions - This is a bitvector that contains two bits for each
80 /// value type, where the two bits correspond to the LegalizeAction enum.
81 /// This can be queried with "getTypeAction(VT)".
82 TargetLowering::ValueTypeActionImpl ValueTypeActions
;
84 /// LegalizedNodes - For nodes that are of legal width, and that have more
85 /// than one use, this map indicates what regularized operand to use. This
86 /// allows us to avoid legalizing the same thing more than once.
87 DenseMap
<SDValue
, SDValue
> LegalizedNodes
;
89 void AddLegalizedOperand(SDValue From
, SDValue To
) {
90 LegalizedNodes
.insert(std::make_pair(From
, To
));
91 // If someone requests legalization of the new node, return itself.
93 LegalizedNodes
.insert(std::make_pair(To
, To
));
97 SelectionDAGLegalize(SelectionDAG
&DAG
, CodeGenOpt::Level ol
);
99 /// getTypeAction - Return how we should legalize values of this type, either
100 /// it is already legal or we need to expand it into multiple registers of
101 /// smaller integer type, or we need to promote it to a larger type.
102 LegalizeAction
getTypeAction(EVT VT
) const {
103 return (LegalizeAction
)ValueTypeActions
.getTypeAction(VT
);
106 /// isTypeLegal - Return true if this type is legal on this target.
108 bool isTypeLegal(EVT VT
) const {
109 return getTypeAction(VT
) == Legal
;
115 /// LegalizeOp - We know that the specified value has a legal type.
116 /// Recursively ensure that the operands have legal types, then return the
118 SDValue
LegalizeOp(SDValue O
);
120 SDValue
OptimizeFloatStore(StoreSDNode
*ST
);
122 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
123 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
124 /// is necessary to spill the vector being inserted into to memory, perform
125 /// the insert there, and then read the result back.
126 SDValue
PerformInsertVectorEltInMemory(SDValue Vec
, SDValue Val
,
127 SDValue Idx
, DebugLoc dl
);
128 SDValue
ExpandINSERT_VECTOR_ELT(SDValue Vec
, SDValue Val
,
129 SDValue Idx
, DebugLoc dl
);
131 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
132 /// performs the same shuffe in terms of order or result bytes, but on a type
133 /// whose vector element type is narrower than the original shuffle type.
134 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
135 SDValue
ShuffleWithNarrowerEltType(EVT NVT
, EVT VT
, DebugLoc dl
,
136 SDValue N1
, SDValue N2
,
137 SmallVectorImpl
<int> &Mask
) const;
139 bool LegalizeAllNodesNotLeadingTo(SDNode
*N
, SDNode
*Dest
,
140 SmallPtrSet
<SDNode
*, 32> &NodesLeadingTo
);
142 void LegalizeSetCCCondCode(EVT VT
, SDValue
&LHS
, SDValue
&RHS
, SDValue
&CC
,
145 SDValue
ExpandLibCall(RTLIB::Libcall LC
, SDNode
*Node
, bool isSigned
);
146 std::pair
<SDValue
, SDValue
> ExpandChainLibCall(RTLIB::Libcall LC
,
147 SDNode
*Node
, bool isSigned
);
148 SDValue
ExpandFPLibCall(SDNode
*Node
, RTLIB::Libcall Call_F32
,
149 RTLIB::Libcall Call_F64
, RTLIB::Libcall Call_F80
,
150 RTLIB::Libcall Call_PPCF128
);
151 SDValue
ExpandIntLibCall(SDNode
*Node
, bool isSigned
,
152 RTLIB::Libcall Call_I8
,
153 RTLIB::Libcall Call_I16
,
154 RTLIB::Libcall Call_I32
,
155 RTLIB::Libcall Call_I64
,
156 RTLIB::Libcall Call_I128
);
158 SDValue
EmitStackConvert(SDValue SrcOp
, EVT SlotVT
, EVT DestVT
, DebugLoc dl
);
159 SDValue
ExpandBUILD_VECTOR(SDNode
*Node
);
160 SDValue
ExpandSCALAR_TO_VECTOR(SDNode
*Node
);
161 void ExpandDYNAMIC_STACKALLOC(SDNode
*Node
,
162 SmallVectorImpl
<SDValue
> &Results
);
163 SDValue
ExpandFCOPYSIGN(SDNode
*Node
);
164 SDValue
ExpandLegalINT_TO_FP(bool isSigned
, SDValue LegalOp
, EVT DestVT
,
166 SDValue
PromoteLegalINT_TO_FP(SDValue LegalOp
, EVT DestVT
, bool isSigned
,
168 SDValue
PromoteLegalFP_TO_INT(SDValue LegalOp
, EVT DestVT
, bool isSigned
,
171 SDValue
ExpandBSWAP(SDValue Op
, DebugLoc dl
);
172 SDValue
ExpandBitCount(unsigned Opc
, SDValue Op
, DebugLoc dl
);
174 SDValue
ExpandExtractFromVectorThroughStack(SDValue Op
);
175 SDValue
ExpandVectorBuildThroughStack(SDNode
* Node
);
177 std::pair
<SDValue
, SDValue
> ExpandAtomic(SDNode
*Node
);
179 void ExpandNode(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
180 void PromoteNode(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
184 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
185 /// performs the same shuffe in terms of order or result bytes, but on a type
186 /// whose vector element type is narrower than the original shuffle type.
187 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
189 SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT
, EVT VT
, DebugLoc dl
,
190 SDValue N1
, SDValue N2
,
191 SmallVectorImpl
<int> &Mask
) const {
192 unsigned NumMaskElts
= VT
.getVectorNumElements();
193 unsigned NumDestElts
= NVT
.getVectorNumElements();
194 unsigned NumEltsGrowth
= NumDestElts
/ NumMaskElts
;
196 assert(NumEltsGrowth
&& "Cannot promote to vector type with fewer elts!");
198 if (NumEltsGrowth
== 1)
199 return DAG
.getVectorShuffle(NVT
, dl
, N1
, N2
, &Mask
[0]);
201 SmallVector
<int, 8> NewMask
;
202 for (unsigned i
= 0; i
!= NumMaskElts
; ++i
) {
204 for (unsigned j
= 0; j
!= NumEltsGrowth
; ++j
) {
206 NewMask
.push_back(-1);
208 NewMask
.push_back(Idx
* NumEltsGrowth
+ j
);
211 assert(NewMask
.size() == NumDestElts
&& "Non-integer NumEltsGrowth?");
212 assert(TLI
.isShuffleMaskLegal(NewMask
, NVT
) && "Shuffle not legal?");
213 return DAG
.getVectorShuffle(NVT
, dl
, N1
, N2
, &NewMask
[0]);
216 SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG
&dag
,
217 CodeGenOpt::Level ol
)
218 : TM(dag
.getTarget()), TLI(dag
.getTargetLoweringInfo()),
219 DAG(dag
), OptLevel(ol
),
220 ValueTypeActions(TLI
.getValueTypeActions()) {
221 assert(MVT::LAST_VALUETYPE
<= MVT::MAX_ALLOWED_VALUETYPE
&&
222 "Too many value types for ValueTypeActions to hold!");
225 void SelectionDAGLegalize::LegalizeDAG() {
226 LastCALLSEQ_END
= DAG
.getEntryNode();
227 IsLegalizingCall
= false;
229 // The legalize process is inherently a bottom-up recursive process (users
230 // legalize their uses before themselves). Given infinite stack space, we
231 // could just start legalizing on the root and traverse the whole graph. In
232 // practice however, this causes us to run out of stack space on large basic
233 // blocks. To avoid this problem, compute an ordering of the nodes where each
234 // node is only legalized after all of its operands are legalized.
235 DAG
.AssignTopologicalOrder();
236 for (SelectionDAG::allnodes_iterator I
= DAG
.allnodes_begin(),
237 E
= prior(DAG
.allnodes_end()); I
!= llvm::next(E
); ++I
)
238 LegalizeOp(SDValue(I
, 0));
240 // Finally, it's possible the root changed. Get the new root.
241 SDValue OldRoot
= DAG
.getRoot();
242 assert(LegalizedNodes
.count(OldRoot
) && "Root didn't get legalized?");
243 DAG
.setRoot(LegalizedNodes
[OldRoot
]);
245 LegalizedNodes
.clear();
247 // Remove dead nodes now.
248 DAG
.RemoveDeadNodes();
252 /// FindCallEndFromCallStart - Given a chained node that is part of a call
253 /// sequence, find the CALLSEQ_END node that terminates the call sequence.
254 static SDNode
*FindCallEndFromCallStart(SDNode
*Node
) {
255 if (Node
->getOpcode() == ISD::CALLSEQ_END
)
257 if (Node
->use_empty())
258 return 0; // No CallSeqEnd
260 // The chain is usually at the end.
261 SDValue
TheChain(Node
, Node
->getNumValues()-1);
262 if (TheChain
.getValueType() != MVT::Other
) {
263 // Sometimes it's at the beginning.
264 TheChain
= SDValue(Node
, 0);
265 if (TheChain
.getValueType() != MVT::Other
) {
266 // Otherwise, hunt for it.
267 for (unsigned i
= 1, e
= Node
->getNumValues(); i
!= e
; ++i
)
268 if (Node
->getValueType(i
) == MVT::Other
) {
269 TheChain
= SDValue(Node
, i
);
273 // Otherwise, we walked into a node without a chain.
274 if (TheChain
.getValueType() != MVT::Other
)
279 for (SDNode::use_iterator UI
= Node
->use_begin(),
280 E
= Node
->use_end(); UI
!= E
; ++UI
) {
282 // Make sure to only follow users of our token chain.
284 for (unsigned i
= 0, e
= User
->getNumOperands(); i
!= e
; ++i
)
285 if (User
->getOperand(i
) == TheChain
)
286 if (SDNode
*Result
= FindCallEndFromCallStart(User
))
292 /// FindCallStartFromCallEnd - Given a chained node that is part of a call
293 /// sequence, find the CALLSEQ_START node that initiates the call sequence.
294 static SDNode
*FindCallStartFromCallEnd(SDNode
*Node
) {
295 assert(Node
&& "Didn't find callseq_start for a call??");
296 if (Node
->getOpcode() == ISD::CALLSEQ_START
) return Node
;
298 assert(Node
->getOperand(0).getValueType() == MVT::Other
&&
299 "Node doesn't have a token chain argument!");
300 return FindCallStartFromCallEnd(Node
->getOperand(0).getNode());
303 /// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to
304 /// see if any uses can reach Dest. If no dest operands can get to dest,
305 /// legalize them, legalize ourself, and return false, otherwise, return true.
307 /// Keep track of the nodes we fine that actually do lead to Dest in
308 /// NodesLeadingTo. This avoids retraversing them exponential number of times.
310 bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode
*N
, SDNode
*Dest
,
311 SmallPtrSet
<SDNode
*, 32> &NodesLeadingTo
) {
312 if (N
== Dest
) return true; // N certainly leads to Dest :)
314 // If we've already processed this node and it does lead to Dest, there is no
315 // need to reprocess it.
316 if (NodesLeadingTo
.count(N
)) return true;
318 // If the first result of this node has been already legalized, then it cannot
320 if (LegalizedNodes
.count(SDValue(N
, 0))) return false;
322 // Okay, this node has not already been legalized. Check and legalize all
323 // operands. If none lead to Dest, then we can legalize this node.
324 bool OperandsLeadToDest
= false;
325 for (unsigned i
= 0, e
= N
->getNumOperands(); i
!= e
; ++i
)
326 OperandsLeadToDest
|= // If an operand leads to Dest, so do we.
327 LegalizeAllNodesNotLeadingTo(N
->getOperand(i
).getNode(), Dest
,
330 if (OperandsLeadToDest
) {
331 NodesLeadingTo
.insert(N
);
335 // Okay, this node looks safe, legalize it and return false.
336 LegalizeOp(SDValue(N
, 0));
340 /// ExpandConstantFP - Expands the ConstantFP node to an integer constant or
341 /// a load from the constant pool.
342 static SDValue
ExpandConstantFP(ConstantFPSDNode
*CFP
, bool UseCP
,
343 SelectionDAG
&DAG
, const TargetLowering
&TLI
) {
345 DebugLoc dl
= CFP
->getDebugLoc();
347 // If a FP immediate is precise when represented as a float and if the
348 // target can do an extending load from float to double, we put it into
349 // the constant pool as a float, even if it's is statically typed as a
350 // double. This shrinks FP constants and canonicalizes them for targets where
351 // an FP extending load is the same cost as a normal load (such as on the x87
352 // fp stack or PPC FP unit).
353 EVT VT
= CFP
->getValueType(0);
354 ConstantFP
*LLVMC
= const_cast<ConstantFP
*>(CFP
->getConstantFPValue());
356 assert((VT
== MVT::f64
|| VT
== MVT::f32
) && "Invalid type expansion");
357 return DAG
.getConstant(LLVMC
->getValueAPF().bitcastToAPInt(),
358 (VT
== MVT::f64
) ? MVT::i64
: MVT::i32
);
363 while (SVT
!= MVT::f32
) {
364 SVT
= (MVT::SimpleValueType
)(SVT
.getSimpleVT().SimpleTy
- 1);
365 if (ConstantFPSDNode::isValueValidForType(SVT
, CFP
->getValueAPF()) &&
366 // Only do this if the target has a native EXTLOAD instruction from
368 TLI
.isLoadExtLegal(ISD::EXTLOAD
, SVT
) &&
369 TLI
.ShouldShrinkFPConstant(OrigVT
)) {
370 const Type
*SType
= SVT
.getTypeForEVT(*DAG
.getContext());
371 LLVMC
= cast
<ConstantFP
>(ConstantExpr::getFPTrunc(LLVMC
, SType
));
377 SDValue CPIdx
= DAG
.getConstantPool(LLVMC
, TLI
.getPointerTy());
378 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
380 return DAG
.getExtLoad(ISD::EXTLOAD
, OrigVT
, dl
,
382 CPIdx
, MachinePointerInfo::getConstantPool(),
383 VT
, false, false, Alignment
);
384 return DAG
.getLoad(OrigVT
, dl
, DAG
.getEntryNode(), CPIdx
,
385 MachinePointerInfo::getConstantPool(), false, false,
389 /// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores.
391 SDValue
ExpandUnalignedStore(StoreSDNode
*ST
, SelectionDAG
&DAG
,
392 const TargetLowering
&TLI
) {
393 SDValue Chain
= ST
->getChain();
394 SDValue Ptr
= ST
->getBasePtr();
395 SDValue Val
= ST
->getValue();
396 EVT VT
= Val
.getValueType();
397 int Alignment
= ST
->getAlignment();
398 DebugLoc dl
= ST
->getDebugLoc();
399 if (ST
->getMemoryVT().isFloatingPoint() ||
400 ST
->getMemoryVT().isVector()) {
401 EVT intVT
= EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits());
402 if (TLI
.isTypeLegal(intVT
)) {
403 // Expand to a bitconvert of the value to the integer type of the
404 // same size, then a (misaligned) int store.
405 // FIXME: Does not handle truncating floating point stores!
406 SDValue Result
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, intVT
, Val
);
407 return DAG
.getStore(Chain
, dl
, Result
, Ptr
, ST
->getPointerInfo(),
408 ST
->isVolatile(), ST
->isNonTemporal(), Alignment
);
410 // Do a (aligned) store to a stack slot, then copy from the stack slot
411 // to the final destination using (unaligned) integer loads and stores.
412 EVT StoredVT
= ST
->getMemoryVT();
414 TLI
.getRegisterType(*DAG
.getContext(),
415 EVT::getIntegerVT(*DAG
.getContext(),
416 StoredVT
.getSizeInBits()));
417 unsigned StoredBytes
= StoredVT
.getSizeInBits() / 8;
418 unsigned RegBytes
= RegVT
.getSizeInBits() / 8;
419 unsigned NumRegs
= (StoredBytes
+ RegBytes
- 1) / RegBytes
;
421 // Make sure the stack slot is also aligned for the register type.
422 SDValue StackPtr
= DAG
.CreateStackTemporary(StoredVT
, RegVT
);
424 // Perform the original store, only redirected to the stack slot.
425 SDValue Store
= DAG
.getTruncStore(Chain
, dl
,
426 Val
, StackPtr
, MachinePointerInfo(),
427 StoredVT
, false, false, 0);
428 SDValue Increment
= DAG
.getConstant(RegBytes
, TLI
.getPointerTy());
429 SmallVector
<SDValue
, 8> Stores
;
432 // Do all but one copies using the full register width.
433 for (unsigned i
= 1; i
< NumRegs
; i
++) {
434 // Load one integer register's worth from the stack slot.
435 SDValue Load
= DAG
.getLoad(RegVT
, dl
, Store
, StackPtr
,
436 MachinePointerInfo(),
438 // Store it to the final location. Remember the store.
439 Stores
.push_back(DAG
.getStore(Load
.getValue(1), dl
, Load
, Ptr
,
440 ST
->getPointerInfo().getWithOffset(Offset
),
441 ST
->isVolatile(), ST
->isNonTemporal(),
442 MinAlign(ST
->getAlignment(), Offset
)));
443 // Increment the pointers.
445 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(), StackPtr
,
447 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
, Increment
);
450 // The last store may be partial. Do a truncating store. On big-endian
451 // machines this requires an extending load from the stack slot to ensure
452 // that the bits are in the right place.
453 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(),
454 8 * (StoredBytes
- Offset
));
456 // Load from the stack slot.
457 SDValue Load
= DAG
.getExtLoad(ISD::EXTLOAD
, RegVT
, dl
, Store
, StackPtr
,
458 MachinePointerInfo(),
459 MemVT
, false, false, 0);
461 Stores
.push_back(DAG
.getTruncStore(Load
.getValue(1), dl
, Load
, Ptr
,
463 .getWithOffset(Offset
),
464 MemVT
, ST
->isVolatile(),
466 MinAlign(ST
->getAlignment(), Offset
)));
467 // The order of the stores doesn't matter - say it with a TokenFactor.
468 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, &Stores
[0],
472 assert(ST
->getMemoryVT().isInteger() &&
473 !ST
->getMemoryVT().isVector() &&
474 "Unaligned store of unknown type.");
475 // Get the half-size VT
476 EVT NewStoredVT
= ST
->getMemoryVT().getHalfSizedIntegerVT(*DAG
.getContext());
477 int NumBits
= NewStoredVT
.getSizeInBits();
478 int IncrementSize
= NumBits
/ 8;
480 // Divide the stored value in two parts.
481 SDValue ShiftAmount
= DAG
.getConstant(NumBits
, TLI
.getShiftAmountTy());
483 SDValue Hi
= DAG
.getNode(ISD::SRL
, dl
, VT
, Val
, ShiftAmount
);
485 // Store the two parts
486 SDValue Store1
, Store2
;
487 Store1
= DAG
.getTruncStore(Chain
, dl
, TLI
.isLittleEndian()?Lo
:Hi
, Ptr
,
488 ST
->getPointerInfo(), NewStoredVT
,
489 ST
->isVolatile(), ST
->isNonTemporal(), Alignment
);
490 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
491 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
492 Alignment
= MinAlign(Alignment
, IncrementSize
);
493 Store2
= DAG
.getTruncStore(Chain
, dl
, TLI
.isLittleEndian()?Hi
:Lo
, Ptr
,
494 ST
->getPointerInfo().getWithOffset(IncrementSize
),
495 NewStoredVT
, ST
->isVolatile(), ST
->isNonTemporal(),
498 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Store1
, Store2
);
501 /// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads.
503 SDValue
ExpandUnalignedLoad(LoadSDNode
*LD
, SelectionDAG
&DAG
,
504 const TargetLowering
&TLI
) {
505 SDValue Chain
= LD
->getChain();
506 SDValue Ptr
= LD
->getBasePtr();
507 EVT VT
= LD
->getValueType(0);
508 EVT LoadedVT
= LD
->getMemoryVT();
509 DebugLoc dl
= LD
->getDebugLoc();
510 if (VT
.isFloatingPoint() || VT
.isVector()) {
511 EVT intVT
= EVT::getIntegerVT(*DAG
.getContext(), LoadedVT
.getSizeInBits());
512 if (TLI
.isTypeLegal(intVT
)) {
513 // Expand to a (misaligned) integer load of the same size,
514 // then bitconvert to floating point or vector.
515 SDValue newLoad
= DAG
.getLoad(intVT
, dl
, Chain
, Ptr
, LD
->getPointerInfo(),
517 LD
->isNonTemporal(), LD
->getAlignment());
518 SDValue Result
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, LoadedVT
, newLoad
);
519 if (VT
.isFloatingPoint() && LoadedVT
!= VT
)
520 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
, VT
, Result
);
522 SDValue Ops
[] = { Result
, Chain
};
523 return DAG
.getMergeValues(Ops
, 2, dl
);
526 // Copy the value to a (aligned) stack slot using (unaligned) integer
527 // loads and stores, then do a (aligned) load from the stack slot.
528 EVT RegVT
= TLI
.getRegisterType(*DAG
.getContext(), intVT
);
529 unsigned LoadedBytes
= LoadedVT
.getSizeInBits() / 8;
530 unsigned RegBytes
= RegVT
.getSizeInBits() / 8;
531 unsigned NumRegs
= (LoadedBytes
+ RegBytes
- 1) / RegBytes
;
533 // Make sure the stack slot is also aligned for the register type.
534 SDValue StackBase
= DAG
.CreateStackTemporary(LoadedVT
, RegVT
);
536 SDValue Increment
= DAG
.getConstant(RegBytes
, TLI
.getPointerTy());
537 SmallVector
<SDValue
, 8> Stores
;
538 SDValue StackPtr
= StackBase
;
541 // Do all but one copies using the full register width.
542 for (unsigned i
= 1; i
< NumRegs
; i
++) {
543 // Load one integer register's worth from the original location.
544 SDValue Load
= DAG
.getLoad(RegVT
, dl
, Chain
, Ptr
,
545 LD
->getPointerInfo().getWithOffset(Offset
),
546 LD
->isVolatile(), LD
->isNonTemporal(),
547 MinAlign(LD
->getAlignment(), Offset
));
548 // Follow the load with a store to the stack slot. Remember the store.
549 Stores
.push_back(DAG
.getStore(Load
.getValue(1), dl
, Load
, StackPtr
,
550 MachinePointerInfo(), false, false, 0));
551 // Increment the pointers.
553 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
, Increment
);
554 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(), StackPtr
,
558 // The last copy may be partial. Do an extending load.
559 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(),
560 8 * (LoadedBytes
- Offset
));
561 SDValue Load
= DAG
.getExtLoad(ISD::EXTLOAD
, RegVT
, dl
, Chain
, Ptr
,
562 LD
->getPointerInfo().getWithOffset(Offset
),
563 MemVT
, LD
->isVolatile(),
565 MinAlign(LD
->getAlignment(), Offset
));
566 // Follow the load with a store to the stack slot. Remember the store.
567 // On big-endian machines this requires a truncating store to ensure
568 // that the bits end up in the right place.
569 Stores
.push_back(DAG
.getTruncStore(Load
.getValue(1), dl
, Load
, StackPtr
,
570 MachinePointerInfo(), MemVT
,
573 // The order of the stores doesn't matter - say it with a TokenFactor.
574 SDValue TF
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, &Stores
[0],
577 // Finally, perform the original load only redirected to the stack slot.
578 Load
= DAG
.getExtLoad(LD
->getExtensionType(), VT
, dl
, TF
, StackBase
,
579 MachinePointerInfo(), LoadedVT
, false, false, 0);
581 // Callers expect a MERGE_VALUES node.
582 SDValue Ops
[] = { Load
, TF
};
583 return DAG
.getMergeValues(Ops
, 2, dl
);
585 assert(LoadedVT
.isInteger() && !LoadedVT
.isVector() &&
586 "Unaligned load of unsupported type.");
588 // Compute the new VT that is half the size of the old one. This is an
590 unsigned NumBits
= LoadedVT
.getSizeInBits();
592 NewLoadedVT
= EVT::getIntegerVT(*DAG
.getContext(), NumBits
/2);
595 unsigned Alignment
= LD
->getAlignment();
596 unsigned IncrementSize
= NumBits
/ 8;
597 ISD::LoadExtType HiExtType
= LD
->getExtensionType();
599 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
600 if (HiExtType
== ISD::NON_EXTLOAD
)
601 HiExtType
= ISD::ZEXTLOAD
;
603 // Load the value in two parts
605 if (TLI
.isLittleEndian()) {
606 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, VT
, dl
, Chain
, Ptr
, LD
->getPointerInfo(),
607 NewLoadedVT
, LD
->isVolatile(),
608 LD
->isNonTemporal(), Alignment
);
609 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
610 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
611 Hi
= DAG
.getExtLoad(HiExtType
, VT
, dl
, Chain
, Ptr
,
612 LD
->getPointerInfo().getWithOffset(IncrementSize
),
613 NewLoadedVT
, LD
->isVolatile(),
614 LD
->isNonTemporal(), MinAlign(Alignment
,IncrementSize
));
616 Hi
= DAG
.getExtLoad(HiExtType
, VT
, dl
, Chain
, Ptr
, LD
->getPointerInfo(),
617 NewLoadedVT
, LD
->isVolatile(),
618 LD
->isNonTemporal(), Alignment
);
619 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
620 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
621 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, VT
, dl
, Chain
, Ptr
,
622 LD
->getPointerInfo().getWithOffset(IncrementSize
),
623 NewLoadedVT
, LD
->isVolatile(),
624 LD
->isNonTemporal(), MinAlign(Alignment
,IncrementSize
));
627 // aggregate the two parts
628 SDValue ShiftAmount
= DAG
.getConstant(NumBits
, TLI
.getShiftAmountTy());
629 SDValue Result
= DAG
.getNode(ISD::SHL
, dl
, VT
, Hi
, ShiftAmount
);
630 Result
= DAG
.getNode(ISD::OR
, dl
, VT
, Result
, Lo
);
632 SDValue TF
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
635 SDValue Ops
[] = { Result
, TF
};
636 return DAG
.getMergeValues(Ops
, 2, dl
);
639 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
640 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
641 /// is necessary to spill the vector being inserted into to memory, perform
642 /// the insert there, and then read the result back.
643 SDValue
SelectionDAGLegalize::
644 PerformInsertVectorEltInMemory(SDValue Vec
, SDValue Val
, SDValue Idx
,
650 // If the target doesn't support this, we have to spill the input vector
651 // to a temporary stack slot, update the element, then reload it. This is
652 // badness. We could also load the value into a vector register (either
653 // with a "move to register" or "extload into register" instruction, then
654 // permute it into place, if the idx is a constant and if the idx is
655 // supported by the target.
656 EVT VT
= Tmp1
.getValueType();
657 EVT EltVT
= VT
.getVectorElementType();
658 EVT IdxVT
= Tmp3
.getValueType();
659 EVT PtrVT
= TLI
.getPointerTy();
660 SDValue StackPtr
= DAG
.CreateStackTemporary(VT
);
662 int SPFI
= cast
<FrameIndexSDNode
>(StackPtr
.getNode())->getIndex();
665 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Tmp1
, StackPtr
,
666 MachinePointerInfo::getFixedStack(SPFI
),
669 // Truncate or zero extend offset to target pointer type.
670 unsigned CastOpc
= IdxVT
.bitsGT(PtrVT
) ? ISD::TRUNCATE
: ISD::ZERO_EXTEND
;
671 Tmp3
= DAG
.getNode(CastOpc
, dl
, PtrVT
, Tmp3
);
672 // Add the offset to the index.
673 unsigned EltSize
= EltVT
.getSizeInBits()/8;
674 Tmp3
= DAG
.getNode(ISD::MUL
, dl
, IdxVT
, Tmp3
,DAG
.getConstant(EltSize
, IdxVT
));
675 SDValue StackPtr2
= DAG
.getNode(ISD::ADD
, dl
, IdxVT
, Tmp3
, StackPtr
);
676 // Store the scalar value.
677 Ch
= DAG
.getTruncStore(Ch
, dl
, Tmp2
, StackPtr2
, MachinePointerInfo(), EltVT
,
679 // Load the updated vector.
680 return DAG
.getLoad(VT
, dl
, Ch
, StackPtr
,
681 MachinePointerInfo::getFixedStack(SPFI
), false, false, 0);
685 SDValue
SelectionDAGLegalize::
686 ExpandINSERT_VECTOR_ELT(SDValue Vec
, SDValue Val
, SDValue Idx
, DebugLoc dl
) {
687 if (ConstantSDNode
*InsertPos
= dyn_cast
<ConstantSDNode
>(Idx
)) {
688 // SCALAR_TO_VECTOR requires that the type of the value being inserted
689 // match the element type of the vector being created, except for
690 // integers in which case the inserted value can be over width.
691 EVT EltVT
= Vec
.getValueType().getVectorElementType();
692 if (Val
.getValueType() == EltVT
||
693 (EltVT
.isInteger() && Val
.getValueType().bitsGE(EltVT
))) {
694 SDValue ScVec
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
,
695 Vec
.getValueType(), Val
);
697 unsigned NumElts
= Vec
.getValueType().getVectorNumElements();
698 // We generate a shuffle of InVec and ScVec, so the shuffle mask
699 // should be 0,1,2,3,4,5... with the appropriate element replaced with
701 SmallVector
<int, 8> ShufOps
;
702 for (unsigned i
= 0; i
!= NumElts
; ++i
)
703 ShufOps
.push_back(i
!= InsertPos
->getZExtValue() ? i
: NumElts
);
705 return DAG
.getVectorShuffle(Vec
.getValueType(), dl
, Vec
, ScVec
,
709 return PerformInsertVectorEltInMemory(Vec
, Val
, Idx
, dl
);
712 SDValue
SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode
* ST
) {
713 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
714 // FIXME: We shouldn't do this for TargetConstantFP's.
715 // FIXME: move this to the DAG Combiner! Note that we can't regress due
716 // to phase ordering between legalized code and the dag combiner. This
717 // probably means that we need to integrate dag combiner and legalizer
719 // We generally can't do this one for long doubles.
720 SDValue Tmp1
= ST
->getChain();
721 SDValue Tmp2
= ST
->getBasePtr();
723 unsigned Alignment
= ST
->getAlignment();
724 bool isVolatile
= ST
->isVolatile();
725 bool isNonTemporal
= ST
->isNonTemporal();
726 DebugLoc dl
= ST
->getDebugLoc();
727 if (ConstantFPSDNode
*CFP
= dyn_cast
<ConstantFPSDNode
>(ST
->getValue())) {
728 if (CFP
->getValueType(0) == MVT::f32
&&
729 getTypeAction(MVT::i32
) == Legal
) {
730 Tmp3
= DAG
.getConstant(CFP
->getValueAPF().
731 bitcastToAPInt().zextOrTrunc(32),
733 return DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getPointerInfo(),
734 isVolatile
, isNonTemporal
, Alignment
);
737 if (CFP
->getValueType(0) == MVT::f64
) {
738 // If this target supports 64-bit registers, do a single 64-bit store.
739 if (getTypeAction(MVT::i64
) == Legal
) {
740 Tmp3
= DAG
.getConstant(CFP
->getValueAPF().bitcastToAPInt().
741 zextOrTrunc(64), MVT::i64
);
742 return DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getPointerInfo(),
743 isVolatile
, isNonTemporal
, Alignment
);
746 if (getTypeAction(MVT::i32
) == Legal
&& !ST
->isVolatile()) {
747 // Otherwise, if the target supports 32-bit registers, use 2 32-bit
748 // stores. If the target supports neither 32- nor 64-bits, this
749 // xform is certainly not worth it.
750 const APInt
&IntVal
=CFP
->getValueAPF().bitcastToAPInt();
751 SDValue Lo
= DAG
.getConstant(APInt(IntVal
).trunc(32), MVT::i32
);
752 SDValue Hi
= DAG
.getConstant(IntVal
.lshr(32).trunc(32), MVT::i32
);
753 if (TLI
.isBigEndian()) std::swap(Lo
, Hi
);
755 Lo
= DAG
.getStore(Tmp1
, dl
, Lo
, Tmp2
, ST
->getPointerInfo(), isVolatile
,
756 isNonTemporal
, Alignment
);
757 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
758 DAG
.getIntPtrConstant(4));
759 Hi
= DAG
.getStore(Tmp1
, dl
, Hi
, Tmp2
,
760 ST
->getPointerInfo().getWithOffset(4),
761 isVolatile
, isNonTemporal
, MinAlign(Alignment
, 4U));
763 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
, Hi
);
770 /// LegalizeOp - We know that the specified value has a legal type, and
771 /// that its operands are legal. Now ensure that the operation itself
772 /// is legal, recursively ensuring that the operands' operations remain
774 SDValue
SelectionDAGLegalize::LegalizeOp(SDValue Op
) {
775 if (Op
.getOpcode() == ISD::TargetConstant
) // Allow illegal target nodes.
778 SDNode
*Node
= Op
.getNode();
779 DebugLoc dl
= Node
->getDebugLoc();
781 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
782 assert(getTypeAction(Node
->getValueType(i
)) == Legal
&&
783 "Unexpected illegal type!");
785 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
)
786 assert((isTypeLegal(Node
->getOperand(i
).getValueType()) ||
787 Node
->getOperand(i
).getOpcode() == ISD::TargetConstant
) &&
788 "Unexpected illegal type!");
790 // Note that LegalizeOp may be reentered even from single-use nodes, which
791 // means that we always must cache transformed nodes.
792 DenseMap
<SDValue
, SDValue
>::iterator I
= LegalizedNodes
.find(Op
);
793 if (I
!= LegalizedNodes
.end()) return I
->second
;
795 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
;
797 bool isCustom
= false;
799 // Figure out the correct action; the way to query this varies by opcode
800 TargetLowering::LegalizeAction Action
;
801 bool SimpleFinishLegalizing
= true;
802 switch (Node
->getOpcode()) {
803 case ISD::INTRINSIC_W_CHAIN
:
804 case ISD::INTRINSIC_WO_CHAIN
:
805 case ISD::INTRINSIC_VOID
:
808 Action
= TLI
.getOperationAction(Node
->getOpcode(), MVT::Other
);
810 case ISD::SINT_TO_FP
:
811 case ISD::UINT_TO_FP
:
812 case ISD::EXTRACT_VECTOR_ELT
:
813 Action
= TLI
.getOperationAction(Node
->getOpcode(),
814 Node
->getOperand(0).getValueType());
816 case ISD::FP_ROUND_INREG
:
817 case ISD::SIGN_EXTEND_INREG
: {
818 EVT InnerType
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
819 Action
= TLI
.getOperationAction(Node
->getOpcode(), InnerType
);
825 unsigned CCOperand
= Node
->getOpcode() == ISD::SELECT_CC
? 4 :
826 Node
->getOpcode() == ISD::SETCC
? 2 : 1;
827 unsigned CompareOperand
= Node
->getOpcode() == ISD::BR_CC
? 2 : 0;
828 EVT OpVT
= Node
->getOperand(CompareOperand
).getValueType();
829 ISD::CondCode CCCode
=
830 cast
<CondCodeSDNode
>(Node
->getOperand(CCOperand
))->get();
831 Action
= TLI
.getCondCodeAction(CCCode
, OpVT
);
832 if (Action
== TargetLowering::Legal
) {
833 if (Node
->getOpcode() == ISD::SELECT_CC
)
834 Action
= TLI
.getOperationAction(Node
->getOpcode(),
835 Node
->getValueType(0));
837 Action
= TLI
.getOperationAction(Node
->getOpcode(), OpVT
);
843 // FIXME: Model these properly. LOAD and STORE are complicated, and
844 // STORE expects the unlegalized operand in some cases.
845 SimpleFinishLegalizing
= false;
847 case ISD::CALLSEQ_START
:
848 case ISD::CALLSEQ_END
:
849 // FIXME: This shouldn't be necessary. These nodes have special properties
850 // dealing with the recursive nature of legalization. Removing this
851 // special case should be done as part of making LegalizeDAG non-recursive.
852 SimpleFinishLegalizing
= false;
854 case ISD::EXTRACT_ELEMENT
:
855 case ISD::FLT_ROUNDS_
:
863 case ISD::MERGE_VALUES
:
865 case ISD::FRAME_TO_ARGS_OFFSET
:
866 case ISD::EH_SJLJ_SETJMP
:
867 case ISD::EH_SJLJ_LONGJMP
:
868 case ISD::EH_SJLJ_DISPATCHSETUP
:
869 // These operations lie about being legal: when they claim to be legal,
870 // they should actually be expanded.
871 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
872 if (Action
== TargetLowering::Legal
)
873 Action
= TargetLowering::Expand
;
875 case ISD::TRAMPOLINE
:
877 case ISD::RETURNADDR
:
878 // These operations lie about being legal: when they claim to be legal,
879 // they should actually be custom-lowered.
880 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
881 if (Action
== TargetLowering::Legal
)
882 Action
= TargetLowering::Custom
;
884 case ISD::BUILD_VECTOR
:
885 // A weird case: legalization for BUILD_VECTOR never legalizes the
887 // FIXME: This really sucks... changing it isn't semantically incorrect,
888 // but it massively pessimizes the code for floating-point BUILD_VECTORs
889 // because ConstantFP operands get legalized into constant pool loads
890 // before the BUILD_VECTOR code can see them. It doesn't usually bite,
891 // though, because BUILD_VECTORS usually get lowered into other nodes
892 // which get legalized properly.
893 SimpleFinishLegalizing
= false;
896 if (Node
->getOpcode() >= ISD::BUILTIN_OP_END
) {
897 Action
= TargetLowering::Legal
;
899 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
904 if (SimpleFinishLegalizing
) {
905 SmallVector
<SDValue
, 8> Ops
, ResultVals
;
906 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
)
907 Ops
.push_back(LegalizeOp(Node
->getOperand(i
)));
908 switch (Node
->getOpcode()) {
915 // Branches tweak the chain to include LastCALLSEQ_END
916 Ops
[0] = DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Ops
[0],
918 Ops
[0] = LegalizeOp(Ops
[0]);
919 LastCALLSEQ_END
= DAG
.getEntryNode();
926 // Legalizing shifts/rotates requires adjusting the shift amount
927 // to the appropriate width.
928 if (!Ops
[1].getValueType().isVector())
929 Ops
[1] = LegalizeOp(DAG
.getShiftAmountOperand(Ops
[1]));
934 // Legalizing shifts/rotates requires adjusting the shift amount
935 // to the appropriate width.
936 if (!Ops
[2].getValueType().isVector())
937 Ops
[2] = LegalizeOp(DAG
.getShiftAmountOperand(Ops
[2]));
941 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(), Ops
.data(),
944 case TargetLowering::Legal
:
945 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
946 ResultVals
.push_back(Result
.getValue(i
));
948 case TargetLowering::Custom
:
949 // FIXME: The handling for custom lowering with multiple results is
951 Tmp1
= TLI
.LowerOperation(Result
, DAG
);
952 if (Tmp1
.getNode()) {
953 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
) {
955 ResultVals
.push_back(Tmp1
);
957 ResultVals
.push_back(Tmp1
.getValue(i
));
963 case TargetLowering::Expand
:
964 ExpandNode(Result
.getNode(), ResultVals
);
966 case TargetLowering::Promote
:
967 PromoteNode(Result
.getNode(), ResultVals
);
970 if (!ResultVals
.empty()) {
971 for (unsigned i
= 0, e
= ResultVals
.size(); i
!= e
; ++i
) {
972 if (ResultVals
[i
] != SDValue(Node
, i
))
973 ResultVals
[i
] = LegalizeOp(ResultVals
[i
]);
974 AddLegalizedOperand(SDValue(Node
, i
), ResultVals
[i
]);
976 return ResultVals
[Op
.getResNo()];
980 switch (Node
->getOpcode()) {
987 assert(0 && "Do not know how to legalize this operator!");
989 case ISD::BUILD_VECTOR
:
990 switch (TLI
.getOperationAction(ISD::BUILD_VECTOR
, Node
->getValueType(0))) {
991 default: assert(0 && "This action is not supported yet!");
992 case TargetLowering::Custom
:
993 Tmp3
= TLI
.LowerOperation(Result
, DAG
);
994 if (Tmp3
.getNode()) {
999 case TargetLowering::Expand
:
1000 Result
= ExpandBUILD_VECTOR(Result
.getNode());
1004 case ISD::CALLSEQ_START
: {
1005 SDNode
*CallEnd
= FindCallEndFromCallStart(Node
);
1007 // Recursively Legalize all of the inputs of the call end that do not lead
1008 // to this call start. This ensures that any libcalls that need be inserted
1009 // are inserted *before* the CALLSEQ_START.
1010 {SmallPtrSet
<SDNode
*, 32> NodesLeadingTo
;
1011 for (unsigned i
= 0, e
= CallEnd
->getNumOperands(); i
!= e
; ++i
)
1012 LegalizeAllNodesNotLeadingTo(CallEnd
->getOperand(i
).getNode(), Node
,
1016 // Now that we have legalized all of the inputs (which may have inserted
1017 // libcalls), create the new CALLSEQ_START node.
1018 Tmp1
= LegalizeOp(Node
->getOperand(0)); // Legalize the chain.
1020 // Merge in the last call to ensure that this call starts after the last
1022 if (LastCALLSEQ_END
.getOpcode() != ISD::EntryToken
) {
1023 Tmp1
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1024 Tmp1
, LastCALLSEQ_END
);
1025 Tmp1
= LegalizeOp(Tmp1
);
1028 // Do not try to legalize the target-specific arguments (#1+).
1029 if (Tmp1
!= Node
->getOperand(0)) {
1030 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1032 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(), &Ops
[0],
1033 Ops
.size()), Result
.getResNo());
1036 // Remember that the CALLSEQ_START is legalized.
1037 AddLegalizedOperand(Op
.getValue(0), Result
);
1038 if (Node
->getNumValues() == 2) // If this has a flag result, remember it.
1039 AddLegalizedOperand(Op
.getValue(1), Result
.getValue(1));
1041 // Now that the callseq_start and all of the non-call nodes above this call
1042 // sequence have been legalized, legalize the call itself. During this
1043 // process, no libcalls can/will be inserted, guaranteeing that no calls
1045 assert(!IsLegalizingCall
&& "Inconsistent sequentialization of calls!");
1046 // Note that we are selecting this call!
1047 LastCALLSEQ_END
= SDValue(CallEnd
, 0);
1048 IsLegalizingCall
= true;
1050 // Legalize the call, starting from the CALLSEQ_END.
1051 LegalizeOp(LastCALLSEQ_END
);
1052 assert(!IsLegalizingCall
&& "CALLSEQ_END should have cleared this!");
1055 case ISD::CALLSEQ_END
:
1056 // If the CALLSEQ_START node hasn't been legalized first, legalize it. This
1057 // will cause this node to be legalized as well as handling libcalls right.
1058 if (LastCALLSEQ_END
.getNode() != Node
) {
1059 LegalizeOp(SDValue(FindCallStartFromCallEnd(Node
), 0));
1060 DenseMap
<SDValue
, SDValue
>::iterator I
= LegalizedNodes
.find(Op
);
1061 assert(I
!= LegalizedNodes
.end() &&
1062 "Legalizing the call start should have legalized this node!");
1066 // Otherwise, the call start has been legalized and everything is going
1067 // according to plan. Just legalize ourselves normally here.
1068 Tmp1
= LegalizeOp(Node
->getOperand(0)); // Legalize the chain.
1069 // Do not try to legalize the target-specific arguments (#1+), except for
1070 // an optional flag input.
1071 if (Node
->getOperand(Node
->getNumOperands()-1).getValueType() != MVT::Flag
){
1072 if (Tmp1
!= Node
->getOperand(0)) {
1073 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1075 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1076 &Ops
[0], Ops
.size()),
1080 Tmp2
= LegalizeOp(Node
->getOperand(Node
->getNumOperands()-1));
1081 if (Tmp1
!= Node
->getOperand(0) ||
1082 Tmp2
!= Node
->getOperand(Node
->getNumOperands()-1)) {
1083 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1086 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1087 &Ops
[0], Ops
.size()),
1091 assert(IsLegalizingCall
&& "Call sequence imbalance between start/end?");
1092 // This finishes up call legalization.
1093 IsLegalizingCall
= false;
1095 // If the CALLSEQ_END node has a flag, remember that we legalized it.
1096 AddLegalizedOperand(SDValue(Node
, 0), Result
.getValue(0));
1097 if (Node
->getNumValues() == 2)
1098 AddLegalizedOperand(SDValue(Node
, 1), Result
.getValue(1));
1099 return Result
.getValue(Op
.getResNo());
1101 LoadSDNode
*LD
= cast
<LoadSDNode
>(Node
);
1102 Tmp1
= LegalizeOp(LD
->getChain()); // Legalize the chain.
1103 Tmp2
= LegalizeOp(LD
->getBasePtr()); // Legalize the base pointer.
1105 ISD::LoadExtType ExtType
= LD
->getExtensionType();
1106 if (ExtType
== ISD::NON_EXTLOAD
) {
1107 EVT VT
= Node
->getValueType(0);
1108 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1109 Tmp1
, Tmp2
, LD
->getOffset()),
1111 Tmp3
= Result
.getValue(0);
1112 Tmp4
= Result
.getValue(1);
1114 switch (TLI
.getOperationAction(Node
->getOpcode(), VT
)) {
1115 default: assert(0 && "This action is not supported yet!");
1116 case TargetLowering::Legal
:
1117 // If this is an unaligned load and the target doesn't support it,
1119 if (!TLI
.allowsUnalignedMemoryAccesses(LD
->getMemoryVT())) {
1120 const Type
*Ty
= LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1121 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1122 if (LD
->getAlignment() < ABIAlignment
){
1123 Result
= ExpandUnalignedLoad(cast
<LoadSDNode
>(Result
.getNode()),
1125 Tmp3
= Result
.getOperand(0);
1126 Tmp4
= Result
.getOperand(1);
1127 Tmp3
= LegalizeOp(Tmp3
);
1128 Tmp4
= LegalizeOp(Tmp4
);
1132 case TargetLowering::Custom
:
1133 Tmp1
= TLI
.LowerOperation(Tmp3
, DAG
);
1134 if (Tmp1
.getNode()) {
1135 Tmp3
= LegalizeOp(Tmp1
);
1136 Tmp4
= LegalizeOp(Tmp1
.getValue(1));
1139 case TargetLowering::Promote
: {
1140 // Only promote a load of vector type to another.
1141 assert(VT
.isVector() && "Cannot promote this load!");
1142 // Change base type to a different vector type.
1143 EVT NVT
= TLI
.getTypeToPromoteTo(Node
->getOpcode(), VT
);
1145 Tmp1
= DAG
.getLoad(NVT
, dl
, Tmp1
, Tmp2
, LD
->getPointerInfo(),
1146 LD
->isVolatile(), LD
->isNonTemporal(),
1147 LD
->getAlignment());
1148 Tmp3
= LegalizeOp(DAG
.getNode(ISD::BIT_CONVERT
, dl
, VT
, Tmp1
));
1149 Tmp4
= LegalizeOp(Tmp1
.getValue(1));
1153 // Since loads produce two values, make sure to remember that we
1154 // legalized both of them.
1155 AddLegalizedOperand(SDValue(Node
, 0), Tmp3
);
1156 AddLegalizedOperand(SDValue(Node
, 1), Tmp4
);
1157 return Op
.getResNo() ? Tmp4
: Tmp3
;
1160 EVT SrcVT
= LD
->getMemoryVT();
1161 unsigned SrcWidth
= SrcVT
.getSizeInBits();
1162 unsigned Alignment
= LD
->getAlignment();
1163 bool isVolatile
= LD
->isVolatile();
1164 bool isNonTemporal
= LD
->isNonTemporal();
1166 if (SrcWidth
!= SrcVT
.getStoreSizeInBits() &&
1167 // Some targets pretend to have an i1 loading operation, and actually
1168 // load an i8. This trick is correct for ZEXTLOAD because the top 7
1169 // bits are guaranteed to be zero; it helps the optimizers understand
1170 // that these bits are zero. It is also useful for EXTLOAD, since it
1171 // tells the optimizers that those bits are undefined. It would be
1172 // nice to have an effective generic way of getting these benefits...
1173 // Until such a way is found, don't insist on promoting i1 here.
1174 (SrcVT
!= MVT::i1
||
1175 TLI
.getLoadExtAction(ExtType
, MVT::i1
) == TargetLowering::Promote
)) {
1176 // Promote to a byte-sized load if not loading an integral number of
1177 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
1178 unsigned NewWidth
= SrcVT
.getStoreSizeInBits();
1179 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(), NewWidth
);
1182 // The extra bits are guaranteed to be zero, since we stored them that
1183 // way. A zext load from NVT thus automatically gives zext from SrcVT.
1185 ISD::LoadExtType NewExtType
=
1186 ExtType
== ISD::ZEXTLOAD
? ISD::ZEXTLOAD
: ISD::EXTLOAD
;
1188 Result
= DAG
.getExtLoad(NewExtType
, Node
->getValueType(0), dl
,
1189 Tmp1
, Tmp2
, LD
->getPointerInfo(),
1190 NVT
, isVolatile
, isNonTemporal
, Alignment
);
1192 Ch
= Result
.getValue(1); // The chain.
1194 if (ExtType
== ISD::SEXTLOAD
)
1195 // Having the top bits zero doesn't help when sign extending.
1196 Result
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
,
1197 Result
.getValueType(),
1198 Result
, DAG
.getValueType(SrcVT
));
1199 else if (ExtType
== ISD::ZEXTLOAD
|| NVT
== Result
.getValueType())
1200 // All the top bits are guaranteed to be zero - inform the optimizers.
1201 Result
= DAG
.getNode(ISD::AssertZext
, dl
,
1202 Result
.getValueType(), Result
,
1203 DAG
.getValueType(SrcVT
));
1205 Tmp1
= LegalizeOp(Result
);
1206 Tmp2
= LegalizeOp(Ch
);
1207 } else if (SrcWidth
& (SrcWidth
- 1)) {
1208 // If not loading a power-of-2 number of bits, expand as two loads.
1209 assert(!SrcVT
.isVector() && "Unsupported extload!");
1210 unsigned RoundWidth
= 1 << Log2_32(SrcWidth
);
1211 assert(RoundWidth
< SrcWidth
);
1212 unsigned ExtraWidth
= SrcWidth
- RoundWidth
;
1213 assert(ExtraWidth
< RoundWidth
);
1214 assert(!(RoundWidth
% 8) && !(ExtraWidth
% 8) &&
1215 "Load size not an integral number of bytes!");
1216 EVT RoundVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundWidth
);
1217 EVT ExtraVT
= EVT::getIntegerVT(*DAG
.getContext(), ExtraWidth
);
1219 unsigned IncrementSize
;
1221 if (TLI
.isLittleEndian()) {
1222 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
1223 // Load the bottom RoundWidth bits.
1224 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, Node
->getValueType(0), dl
,
1226 LD
->getPointerInfo(), RoundVT
, isVolatile
,
1227 isNonTemporal
, Alignment
);
1229 // Load the remaining ExtraWidth bits.
1230 IncrementSize
= RoundWidth
/ 8;
1231 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1232 DAG
.getIntPtrConstant(IncrementSize
));
1233 Hi
= DAG
.getExtLoad(ExtType
, Node
->getValueType(0), dl
, Tmp1
, Tmp2
,
1234 LD
->getPointerInfo().getWithOffset(IncrementSize
),
1235 ExtraVT
, isVolatile
, isNonTemporal
,
1236 MinAlign(Alignment
, IncrementSize
));
1238 // Build a factor node to remember that this load is independent of
1240 Ch
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
1243 // Move the top bits to the right place.
1244 Hi
= DAG
.getNode(ISD::SHL
, dl
, Hi
.getValueType(), Hi
,
1245 DAG
.getConstant(RoundWidth
, TLI
.getShiftAmountTy()));
1247 // Join the hi and lo parts.
1248 Result
= DAG
.getNode(ISD::OR
, dl
, Node
->getValueType(0), Lo
, Hi
);
1250 // Big endian - avoid unaligned loads.
1251 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
1252 // Load the top RoundWidth bits.
1253 Hi
= DAG
.getExtLoad(ExtType
, Node
->getValueType(0), dl
, Tmp1
, Tmp2
,
1254 LD
->getPointerInfo(), RoundVT
, isVolatile
,
1255 isNonTemporal
, Alignment
);
1257 // Load the remaining ExtraWidth bits.
1258 IncrementSize
= RoundWidth
/ 8;
1259 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1260 DAG
.getIntPtrConstant(IncrementSize
));
1261 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
,
1262 Node
->getValueType(0), dl
, Tmp1
, Tmp2
,
1263 LD
->getPointerInfo().getWithOffset(IncrementSize
),
1264 ExtraVT
, isVolatile
, isNonTemporal
,
1265 MinAlign(Alignment
, IncrementSize
));
1267 // Build a factor node to remember that this load is independent of
1269 Ch
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
1272 // Move the top bits to the right place.
1273 Hi
= DAG
.getNode(ISD::SHL
, dl
, Hi
.getValueType(), Hi
,
1274 DAG
.getConstant(ExtraWidth
, TLI
.getShiftAmountTy()));
1276 // Join the hi and lo parts.
1277 Result
= DAG
.getNode(ISD::OR
, dl
, Node
->getValueType(0), Lo
, Hi
);
1280 Tmp1
= LegalizeOp(Result
);
1281 Tmp2
= LegalizeOp(Ch
);
1283 switch (TLI
.getLoadExtAction(ExtType
, SrcVT
)) {
1284 default: assert(0 && "This action is not supported yet!");
1285 case TargetLowering::Custom
:
1288 case TargetLowering::Legal
:
1289 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1290 Tmp1
, Tmp2
, LD
->getOffset()),
1292 Tmp1
= Result
.getValue(0);
1293 Tmp2
= Result
.getValue(1);
1296 Tmp3
= TLI
.LowerOperation(Result
, DAG
);
1297 if (Tmp3
.getNode()) {
1298 Tmp1
= LegalizeOp(Tmp3
);
1299 Tmp2
= LegalizeOp(Tmp3
.getValue(1));
1302 // If this is an unaligned load and the target doesn't support it,
1304 if (!TLI
.allowsUnalignedMemoryAccesses(LD
->getMemoryVT())) {
1306 LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1307 unsigned ABIAlignment
=
1308 TLI
.getTargetData()->getABITypeAlignment(Ty
);
1309 if (LD
->getAlignment() < ABIAlignment
){
1310 Result
= ExpandUnalignedLoad(cast
<LoadSDNode
>(Result
.getNode()),
1312 Tmp1
= Result
.getOperand(0);
1313 Tmp2
= Result
.getOperand(1);
1314 Tmp1
= LegalizeOp(Tmp1
);
1315 Tmp2
= LegalizeOp(Tmp2
);
1320 case TargetLowering::Expand
:
1321 if (!TLI
.isLoadExtLegal(ISD::EXTLOAD
, SrcVT
) && isTypeLegal(SrcVT
)) {
1322 SDValue Load
= DAG
.getLoad(SrcVT
, dl
, Tmp1
, Tmp2
,
1323 LD
->getPointerInfo(),
1324 LD
->isVolatile(), LD
->isNonTemporal(),
1325 LD
->getAlignment());
1329 ExtendOp
= (SrcVT
.isFloatingPoint() ?
1330 ISD::FP_EXTEND
: ISD::ANY_EXTEND
);
1332 case ISD::SEXTLOAD
: ExtendOp
= ISD::SIGN_EXTEND
; break;
1333 case ISD::ZEXTLOAD
: ExtendOp
= ISD::ZERO_EXTEND
; break;
1334 default: llvm_unreachable("Unexpected extend load type!");
1336 Result
= DAG
.getNode(ExtendOp
, dl
, Node
->getValueType(0), Load
);
1337 Tmp1
= LegalizeOp(Result
); // Relegalize new nodes.
1338 Tmp2
= LegalizeOp(Load
.getValue(1));
1341 // FIXME: This does not work for vectors on most targets. Sign- and
1342 // zero-extend operations are currently folded into extending loads,
1343 // whether they are legal or not, and then we end up here without any
1344 // support for legalizing them.
1345 assert(ExtType
!= ISD::EXTLOAD
&&
1346 "EXTLOAD should always be supported!");
1347 // Turn the unsupported load into an EXTLOAD followed by an explicit
1348 // zero/sign extend inreg.
1349 Result
= DAG
.getExtLoad(ISD::EXTLOAD
, Node
->getValueType(0), dl
,
1350 Tmp1
, Tmp2
, LD
->getPointerInfo(), SrcVT
,
1351 LD
->isVolatile(), LD
->isNonTemporal(),
1352 LD
->getAlignment());
1354 if (ExtType
== ISD::SEXTLOAD
)
1355 ValRes
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
,
1356 Result
.getValueType(),
1357 Result
, DAG
.getValueType(SrcVT
));
1359 ValRes
= DAG
.getZeroExtendInReg(Result
, dl
, SrcVT
);
1360 Tmp1
= LegalizeOp(ValRes
); // Relegalize new nodes.
1361 Tmp2
= LegalizeOp(Result
.getValue(1)); // Relegalize new nodes.
1366 // Since loads produce two values, make sure to remember that we legalized
1368 AddLegalizedOperand(SDValue(Node
, 0), Tmp1
);
1369 AddLegalizedOperand(SDValue(Node
, 1), Tmp2
);
1370 return Op
.getResNo() ? Tmp2
: Tmp1
;
1373 StoreSDNode
*ST
= cast
<StoreSDNode
>(Node
);
1374 Tmp1
= LegalizeOp(ST
->getChain()); // Legalize the chain.
1375 Tmp2
= LegalizeOp(ST
->getBasePtr()); // Legalize the pointer.
1376 unsigned Alignment
= ST
->getAlignment();
1377 bool isVolatile
= ST
->isVolatile();
1378 bool isNonTemporal
= ST
->isNonTemporal();
1380 if (!ST
->isTruncatingStore()) {
1381 if (SDNode
*OptStore
= OptimizeFloatStore(ST
).getNode()) {
1382 Result
= SDValue(OptStore
, 0);
1387 Tmp3
= LegalizeOp(ST
->getValue());
1388 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1393 EVT VT
= Tmp3
.getValueType();
1394 switch (TLI
.getOperationAction(ISD::STORE
, VT
)) {
1395 default: assert(0 && "This action is not supported yet!");
1396 case TargetLowering::Legal
:
1397 // If this is an unaligned store and the target doesn't support it,
1399 if (!TLI
.allowsUnalignedMemoryAccesses(ST
->getMemoryVT())) {
1400 const Type
*Ty
= ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1401 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1402 if (ST
->getAlignment() < ABIAlignment
)
1403 Result
= ExpandUnalignedStore(cast
<StoreSDNode
>(Result
.getNode()),
1407 case TargetLowering::Custom
:
1408 Tmp1
= TLI
.LowerOperation(Result
, DAG
);
1409 if (Tmp1
.getNode()) Result
= Tmp1
;
1411 case TargetLowering::Promote
:
1412 assert(VT
.isVector() && "Unknown legal promote case!");
1413 Tmp3
= DAG
.getNode(ISD::BIT_CONVERT
, dl
,
1414 TLI
.getTypeToPromoteTo(ISD::STORE
, VT
), Tmp3
);
1415 Result
= DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
,
1416 ST
->getPointerInfo(), isVolatile
,
1417 isNonTemporal
, Alignment
);
1423 Tmp3
= LegalizeOp(ST
->getValue());
1425 EVT StVT
= ST
->getMemoryVT();
1426 unsigned StWidth
= StVT
.getSizeInBits();
1428 if (StWidth
!= StVT
.getStoreSizeInBits()) {
1429 // Promote to a byte-sized store with upper bits zero if not
1430 // storing an integral number of bytes. For example, promote
1431 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
1432 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(),
1433 StVT
.getStoreSizeInBits());
1434 Tmp3
= DAG
.getZeroExtendInReg(Tmp3
, dl
, StVT
);
1435 Result
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getPointerInfo(),
1436 NVT
, isVolatile
, isNonTemporal
, Alignment
);
1437 } else if (StWidth
& (StWidth
- 1)) {
1438 // If not storing a power-of-2 number of bits, expand as two stores.
1439 assert(!StVT
.isVector() && "Unsupported truncstore!");
1440 unsigned RoundWidth
= 1 << Log2_32(StWidth
);
1441 assert(RoundWidth
< StWidth
);
1442 unsigned ExtraWidth
= StWidth
- RoundWidth
;
1443 assert(ExtraWidth
< RoundWidth
);
1444 assert(!(RoundWidth
% 8) && !(ExtraWidth
% 8) &&
1445 "Store size not an integral number of bytes!");
1446 EVT RoundVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundWidth
);
1447 EVT ExtraVT
= EVT::getIntegerVT(*DAG
.getContext(), ExtraWidth
);
1449 unsigned IncrementSize
;
1451 if (TLI
.isLittleEndian()) {
1452 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
1453 // Store the bottom RoundWidth bits.
1454 Lo
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getPointerInfo(),
1456 isVolatile
, isNonTemporal
, Alignment
);
1458 // Store the remaining ExtraWidth bits.
1459 IncrementSize
= RoundWidth
/ 8;
1460 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1461 DAG
.getIntPtrConstant(IncrementSize
));
1462 Hi
= DAG
.getNode(ISD::SRL
, dl
, Tmp3
.getValueType(), Tmp3
,
1463 DAG
.getConstant(RoundWidth
, TLI
.getShiftAmountTy()));
1464 Hi
= DAG
.getTruncStore(Tmp1
, dl
, Hi
, Tmp2
,
1465 ST
->getPointerInfo().getWithOffset(IncrementSize
),
1466 ExtraVT
, isVolatile
, isNonTemporal
,
1467 MinAlign(Alignment
, IncrementSize
));
1469 // Big endian - avoid unaligned stores.
1470 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
1471 // Store the top RoundWidth bits.
1472 Hi
= DAG
.getNode(ISD::SRL
, dl
, Tmp3
.getValueType(), Tmp3
,
1473 DAG
.getConstant(ExtraWidth
, TLI
.getShiftAmountTy()));
1474 Hi
= DAG
.getTruncStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getPointerInfo(),
1475 RoundVT
, isVolatile
, isNonTemporal
, Alignment
);
1477 // Store the remaining ExtraWidth bits.
1478 IncrementSize
= RoundWidth
/ 8;
1479 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1480 DAG
.getIntPtrConstant(IncrementSize
));
1481 Lo
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
,
1482 ST
->getPointerInfo().getWithOffset(IncrementSize
),
1483 ExtraVT
, isVolatile
, isNonTemporal
,
1484 MinAlign(Alignment
, IncrementSize
));
1487 // The order of the stores doesn't matter.
1488 Result
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
, Hi
);
1490 if (Tmp1
!= ST
->getChain() || Tmp3
!= ST
->getValue() ||
1491 Tmp2
!= ST
->getBasePtr())
1492 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1497 switch (TLI
.getTruncStoreAction(ST
->getValue().getValueType(), StVT
)) {
1498 default: assert(0 && "This action is not supported yet!");
1499 case TargetLowering::Legal
:
1500 // If this is an unaligned store and the target doesn't support it,
1502 if (!TLI
.allowsUnalignedMemoryAccesses(ST
->getMemoryVT())) {
1503 const Type
*Ty
= ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1504 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1505 if (ST
->getAlignment() < ABIAlignment
)
1506 Result
= ExpandUnalignedStore(cast
<StoreSDNode
>(Result
.getNode()),
1510 case TargetLowering::Custom
:
1511 Result
= TLI
.LowerOperation(Result
, DAG
);
1514 // TRUNCSTORE:i16 i32 -> STORE i16
1515 assert(isTypeLegal(StVT
) && "Do not know how to expand this store!");
1516 Tmp3
= DAG
.getNode(ISD::TRUNCATE
, dl
, StVT
, Tmp3
);
1517 Result
= DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getPointerInfo(),
1518 isVolatile
, isNonTemporal
, Alignment
);
1526 assert(Result
.getValueType() == Op
.getValueType() &&
1527 "Bad legalization!");
1529 // Make sure that the generated code is itself legal.
1531 Result
= LegalizeOp(Result
);
1533 // Note that LegalizeOp may be reentered even from single-use nodes, which
1534 // means that we always must cache transformed nodes.
1535 AddLegalizedOperand(Op
, Result
);
1539 SDValue
SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op
) {
1540 SDValue Vec
= Op
.getOperand(0);
1541 SDValue Idx
= Op
.getOperand(1);
1542 DebugLoc dl
= Op
.getDebugLoc();
1543 // Store the value to a temporary stack slot, then LOAD the returned part.
1544 SDValue StackPtr
= DAG
.CreateStackTemporary(Vec
.getValueType());
1545 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Vec
, StackPtr
,
1546 MachinePointerInfo(), false, false, 0);
1548 // Add the offset to the index.
1550 Vec
.getValueType().getVectorElementType().getSizeInBits()/8;
1551 Idx
= DAG
.getNode(ISD::MUL
, dl
, Idx
.getValueType(), Idx
,
1552 DAG
.getConstant(EltSize
, Idx
.getValueType()));
1554 if (Idx
.getValueType().bitsGT(TLI
.getPointerTy()))
1555 Idx
= DAG
.getNode(ISD::TRUNCATE
, dl
, TLI
.getPointerTy(), Idx
);
1557 Idx
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, TLI
.getPointerTy(), Idx
);
1559 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, Idx
.getValueType(), Idx
, StackPtr
);
1561 if (Op
.getValueType().isVector())
1562 return DAG
.getLoad(Op
.getValueType(), dl
, Ch
, StackPtr
,MachinePointerInfo(),
1564 return DAG
.getExtLoad(ISD::EXTLOAD
, Op
.getValueType(), dl
, Ch
, StackPtr
,
1565 MachinePointerInfo(),
1566 Vec
.getValueType().getVectorElementType(),
1570 SDValue
SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode
* Node
) {
1571 // We can't handle this case efficiently. Allocate a sufficiently
1572 // aligned object on the stack, store each element into it, then load
1573 // the result as a vector.
1574 // Create the stack frame object.
1575 EVT VT
= Node
->getValueType(0);
1576 EVT EltVT
= VT
.getVectorElementType();
1577 DebugLoc dl
= Node
->getDebugLoc();
1578 SDValue FIPtr
= DAG
.CreateStackTemporary(VT
);
1579 int FI
= cast
<FrameIndexSDNode
>(FIPtr
.getNode())->getIndex();
1580 MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(FI
);
1582 // Emit a store of each element to the stack slot.
1583 SmallVector
<SDValue
, 8> Stores
;
1584 unsigned TypeByteSize
= EltVT
.getSizeInBits() / 8;
1585 // Store (in the right endianness) the elements to memory.
1586 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1587 // Ignore undef elements.
1588 if (Node
->getOperand(i
).getOpcode() == ISD::UNDEF
) continue;
1590 unsigned Offset
= TypeByteSize
*i
;
1592 SDValue Idx
= DAG
.getConstant(Offset
, FIPtr
.getValueType());
1593 Idx
= DAG
.getNode(ISD::ADD
, dl
, FIPtr
.getValueType(), FIPtr
, Idx
);
1595 // If the destination vector element type is narrower than the source
1596 // element type, only store the bits necessary.
1597 if (EltVT
.bitsLT(Node
->getOperand(i
).getValueType().getScalarType())) {
1598 Stores
.push_back(DAG
.getTruncStore(DAG
.getEntryNode(), dl
,
1599 Node
->getOperand(i
), Idx
,
1600 PtrInfo
.getWithOffset(Offset
),
1601 EltVT
, false, false, 0));
1603 Stores
.push_back(DAG
.getStore(DAG
.getEntryNode(), dl
,
1604 Node
->getOperand(i
), Idx
,
1605 PtrInfo
.getWithOffset(Offset
),
1610 if (!Stores
.empty()) // Not all undef elements?
1611 StoreChain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1612 &Stores
[0], Stores
.size());
1614 StoreChain
= DAG
.getEntryNode();
1616 // Result is a load from the stack slot.
1617 return DAG
.getLoad(VT
, dl
, StoreChain
, FIPtr
, PtrInfo
, false, false, 0);
1620 SDValue
SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode
* Node
) {
1621 DebugLoc dl
= Node
->getDebugLoc();
1622 SDValue Tmp1
= Node
->getOperand(0);
1623 SDValue Tmp2
= Node
->getOperand(1);
1625 // Get the sign bit of the RHS. First obtain a value that has the same
1626 // sign as the sign bit, i.e. negative if and only if the sign bit is 1.
1628 EVT FloatVT
= Tmp2
.getValueType();
1629 EVT IVT
= EVT::getIntegerVT(*DAG
.getContext(), FloatVT
.getSizeInBits());
1630 if (isTypeLegal(IVT
)) {
1631 // Convert to an integer with the same sign bit.
1632 SignBit
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, IVT
, Tmp2
);
1634 // Store the float to memory, then load the sign part out as an integer.
1635 MVT LoadTy
= TLI
.getPointerTy();
1636 // First create a temporary that is aligned for both the load and store.
1637 SDValue StackPtr
= DAG
.CreateStackTemporary(FloatVT
, LoadTy
);
1638 // Then store the float to it.
1640 DAG
.getStore(DAG
.getEntryNode(), dl
, Tmp2
, StackPtr
, MachinePointerInfo(),
1642 if (TLI
.isBigEndian()) {
1643 assert(FloatVT
.isByteSized() && "Unsupported floating point type!");
1644 // Load out a legal integer with the same sign bit as the float.
1645 SignBit
= DAG
.getLoad(LoadTy
, dl
, Ch
, StackPtr
, MachinePointerInfo(),
1647 } else { // Little endian
1648 SDValue LoadPtr
= StackPtr
;
1649 // The float may be wider than the integer we are going to load. Advance
1650 // the pointer so that the loaded integer will contain the sign bit.
1651 unsigned Strides
= (FloatVT
.getSizeInBits()-1)/LoadTy
.getSizeInBits();
1652 unsigned ByteOffset
= (Strides
* LoadTy
.getSizeInBits()) / 8;
1653 LoadPtr
= DAG
.getNode(ISD::ADD
, dl
, LoadPtr
.getValueType(),
1654 LoadPtr
, DAG
.getIntPtrConstant(ByteOffset
));
1655 // Load a legal integer containing the sign bit.
1656 SignBit
= DAG
.getLoad(LoadTy
, dl
, Ch
, LoadPtr
, MachinePointerInfo(),
1658 // Move the sign bit to the top bit of the loaded integer.
1659 unsigned BitShift
= LoadTy
.getSizeInBits() -
1660 (FloatVT
.getSizeInBits() - 8 * ByteOffset
);
1661 assert(BitShift
< LoadTy
.getSizeInBits() && "Pointer advanced wrong?");
1663 SignBit
= DAG
.getNode(ISD::SHL
, dl
, LoadTy
, SignBit
,
1664 DAG
.getConstant(BitShift
,TLI
.getShiftAmountTy()));
1667 // Now get the sign bit proper, by seeing whether the value is negative.
1668 SignBit
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(SignBit
.getValueType()),
1669 SignBit
, DAG
.getConstant(0, SignBit
.getValueType()),
1671 // Get the absolute value of the result.
1672 SDValue AbsVal
= DAG
.getNode(ISD::FABS
, dl
, Tmp1
.getValueType(), Tmp1
);
1673 // Select between the nabs and abs value based on the sign bit of
1675 return DAG
.getNode(ISD::SELECT
, dl
, AbsVal
.getValueType(), SignBit
,
1676 DAG
.getNode(ISD::FNEG
, dl
, AbsVal
.getValueType(), AbsVal
),
1680 void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode
* Node
,
1681 SmallVectorImpl
<SDValue
> &Results
) {
1682 unsigned SPReg
= TLI
.getStackPointerRegisterToSaveRestore();
1683 assert(SPReg
&& "Target cannot require DYNAMIC_STACKALLOC expansion and"
1684 " not tell us which reg is the stack pointer!");
1685 DebugLoc dl
= Node
->getDebugLoc();
1686 EVT VT
= Node
->getValueType(0);
1687 SDValue Tmp1
= SDValue(Node
, 0);
1688 SDValue Tmp2
= SDValue(Node
, 1);
1689 SDValue Tmp3
= Node
->getOperand(2);
1690 SDValue Chain
= Tmp1
.getOperand(0);
1692 // Chain the dynamic stack allocation so that it doesn't modify the stack
1693 // pointer when other instructions are using the stack.
1694 Chain
= DAG
.getCALLSEQ_START(Chain
, DAG
.getIntPtrConstant(0, true));
1696 SDValue Size
= Tmp2
.getOperand(1);
1697 SDValue SP
= DAG
.getCopyFromReg(Chain
, dl
, SPReg
, VT
);
1698 Chain
= SP
.getValue(1);
1699 unsigned Align
= cast
<ConstantSDNode
>(Tmp3
)->getZExtValue();
1700 unsigned StackAlign
= TM
.getFrameInfo()->getStackAlignment();
1701 if (Align
> StackAlign
)
1702 SP
= DAG
.getNode(ISD::AND
, dl
, VT
, SP
,
1703 DAG
.getConstant(-(uint64_t)Align
, VT
));
1704 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, VT
, SP
, Size
); // Value
1705 Chain
= DAG
.getCopyToReg(Chain
, dl
, SPReg
, Tmp1
); // Output chain
1707 Tmp2
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(0, true),
1708 DAG
.getIntPtrConstant(0, true), SDValue());
1710 Results
.push_back(Tmp1
);
1711 Results
.push_back(Tmp2
);
1714 /// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and
1715 /// condition code CC on the current target. This routine expands SETCC with
1716 /// illegal condition code into AND / OR of multiple SETCC values.
1717 void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT
,
1718 SDValue
&LHS
, SDValue
&RHS
,
1721 EVT OpVT
= LHS
.getValueType();
1722 ISD::CondCode CCCode
= cast
<CondCodeSDNode
>(CC
)->get();
1723 switch (TLI
.getCondCodeAction(CCCode
, OpVT
)) {
1724 default: assert(0 && "Unknown condition code action!");
1725 case TargetLowering::Legal
:
1728 case TargetLowering::Expand
: {
1729 ISD::CondCode CC1
= ISD::SETCC_INVALID
, CC2
= ISD::SETCC_INVALID
;
1732 default: assert(0 && "Don't know how to expand this condition!");
1733 case ISD::SETOEQ
: CC1
= ISD::SETEQ
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1734 case ISD::SETOGT
: CC1
= ISD::SETGT
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1735 case ISD::SETOGE
: CC1
= ISD::SETGE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1736 case ISD::SETOLT
: CC1
= ISD::SETLT
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1737 case ISD::SETOLE
: CC1
= ISD::SETLE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1738 case ISD::SETONE
: CC1
= ISD::SETNE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1739 case ISD::SETUEQ
: CC1
= ISD::SETEQ
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1740 case ISD::SETUGT
: CC1
= ISD::SETGT
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1741 case ISD::SETUGE
: CC1
= ISD::SETGE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1742 case ISD::SETULT
: CC1
= ISD::SETLT
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1743 case ISD::SETULE
: CC1
= ISD::SETLE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1744 case ISD::SETUNE
: CC1
= ISD::SETNE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1745 // FIXME: Implement more expansions.
1748 SDValue SetCC1
= DAG
.getSetCC(dl
, VT
, LHS
, RHS
, CC1
);
1749 SDValue SetCC2
= DAG
.getSetCC(dl
, VT
, LHS
, RHS
, CC2
);
1750 LHS
= DAG
.getNode(Opc
, dl
, VT
, SetCC1
, SetCC2
);
1758 /// EmitStackConvert - Emit a store/load combination to the stack. This stores
1759 /// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does
1760 /// a load from the stack slot to DestVT, extending it if needed.
1761 /// The resultant code need not be legal.
1762 SDValue
SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp
,
1766 // Create the stack frame object.
1768 TLI
.getTargetData()->getPrefTypeAlignment(SrcOp
.getValueType().
1769 getTypeForEVT(*DAG
.getContext()));
1770 SDValue FIPtr
= DAG
.CreateStackTemporary(SlotVT
, SrcAlign
);
1772 FrameIndexSDNode
*StackPtrFI
= cast
<FrameIndexSDNode
>(FIPtr
);
1773 int SPFI
= StackPtrFI
->getIndex();
1774 MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(SPFI
);
1776 unsigned SrcSize
= SrcOp
.getValueType().getSizeInBits();
1777 unsigned SlotSize
= SlotVT
.getSizeInBits();
1778 unsigned DestSize
= DestVT
.getSizeInBits();
1779 const Type
*DestType
= DestVT
.getTypeForEVT(*DAG
.getContext());
1780 unsigned DestAlign
= TLI
.getTargetData()->getPrefTypeAlignment(DestType
);
1782 // Emit a store to the stack slot. Use a truncstore if the input value is
1783 // later than DestVT.
1786 if (SrcSize
> SlotSize
)
1787 Store
= DAG
.getTruncStore(DAG
.getEntryNode(), dl
, SrcOp
, FIPtr
,
1788 PtrInfo
, SlotVT
, false, false, SrcAlign
);
1790 assert(SrcSize
== SlotSize
&& "Invalid store");
1791 Store
= DAG
.getStore(DAG
.getEntryNode(), dl
, SrcOp
, FIPtr
,
1792 PtrInfo
, false, false, SrcAlign
);
1795 // Result is a load from the stack slot.
1796 if (SlotSize
== DestSize
)
1797 return DAG
.getLoad(DestVT
, dl
, Store
, FIPtr
, PtrInfo
,
1798 false, false, DestAlign
);
1800 assert(SlotSize
< DestSize
&& "Unknown extension!");
1801 return DAG
.getExtLoad(ISD::EXTLOAD
, DestVT
, dl
, Store
, FIPtr
,
1802 PtrInfo
, SlotVT
, false, false, DestAlign
);
1805 SDValue
SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode
*Node
) {
1806 DebugLoc dl
= Node
->getDebugLoc();
1807 // Create a vector sized/aligned stack slot, store the value to element #0,
1808 // then load the whole vector back out.
1809 SDValue StackPtr
= DAG
.CreateStackTemporary(Node
->getValueType(0));
1811 FrameIndexSDNode
*StackPtrFI
= cast
<FrameIndexSDNode
>(StackPtr
);
1812 int SPFI
= StackPtrFI
->getIndex();
1814 SDValue Ch
= DAG
.getTruncStore(DAG
.getEntryNode(), dl
, Node
->getOperand(0),
1816 MachinePointerInfo::getFixedStack(SPFI
),
1817 Node
->getValueType(0).getVectorElementType(),
1819 return DAG
.getLoad(Node
->getValueType(0), dl
, Ch
, StackPtr
,
1820 MachinePointerInfo::getFixedStack(SPFI
),
1825 /// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't
1826 /// support the operation, but do support the resultant vector type.
1827 SDValue
SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode
*Node
) {
1828 unsigned NumElems
= Node
->getNumOperands();
1829 SDValue Value1
, Value2
;
1830 DebugLoc dl
= Node
->getDebugLoc();
1831 EVT VT
= Node
->getValueType(0);
1832 EVT OpVT
= Node
->getOperand(0).getValueType();
1833 EVT EltVT
= VT
.getVectorElementType();
1835 // If the only non-undef value is the low element, turn this into a
1836 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X.
1837 bool isOnlyLowElement
= true;
1838 bool MoreThanTwoValues
= false;
1839 bool isConstant
= true;
1840 for (unsigned i
= 0; i
< NumElems
; ++i
) {
1841 SDValue V
= Node
->getOperand(i
);
1842 if (V
.getOpcode() == ISD::UNDEF
)
1845 isOnlyLowElement
= false;
1846 if (!isa
<ConstantFPSDNode
>(V
) && !isa
<ConstantSDNode
>(V
))
1849 if (!Value1
.getNode()) {
1851 } else if (!Value2
.getNode()) {
1854 } else if (V
!= Value1
&& V
!= Value2
) {
1855 MoreThanTwoValues
= true;
1859 if (!Value1
.getNode())
1860 return DAG
.getUNDEF(VT
);
1862 if (isOnlyLowElement
)
1863 return DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Node
->getOperand(0));
1865 // If all elements are constants, create a load from the constant pool.
1867 std::vector
<Constant
*> CV
;
1868 for (unsigned i
= 0, e
= NumElems
; i
!= e
; ++i
) {
1869 if (ConstantFPSDNode
*V
=
1870 dyn_cast
<ConstantFPSDNode
>(Node
->getOperand(i
))) {
1871 CV
.push_back(const_cast<ConstantFP
*>(V
->getConstantFPValue()));
1872 } else if (ConstantSDNode
*V
=
1873 dyn_cast
<ConstantSDNode
>(Node
->getOperand(i
))) {
1875 CV
.push_back(const_cast<ConstantInt
*>(V
->getConstantIntValue()));
1877 // If OpVT and EltVT don't match, EltVT is not legal and the
1878 // element values have been promoted/truncated earlier. Undo this;
1879 // we don't want a v16i8 to become a v16i32 for example.
1880 const ConstantInt
*CI
= V
->getConstantIntValue();
1881 CV
.push_back(ConstantInt::get(EltVT
.getTypeForEVT(*DAG
.getContext()),
1882 CI
->getZExtValue()));
1885 assert(Node
->getOperand(i
).getOpcode() == ISD::UNDEF
);
1886 const Type
*OpNTy
= EltVT
.getTypeForEVT(*DAG
.getContext());
1887 CV
.push_back(UndefValue::get(OpNTy
));
1890 Constant
*CP
= ConstantVector::get(CV
);
1891 SDValue CPIdx
= DAG
.getConstantPool(CP
, TLI
.getPointerTy());
1892 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
1893 return DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(), CPIdx
,
1894 MachinePointerInfo::getConstantPool(),
1895 false, false, Alignment
);
1898 if (!MoreThanTwoValues
) {
1899 SmallVector
<int, 8> ShuffleVec(NumElems
, -1);
1900 for (unsigned i
= 0; i
< NumElems
; ++i
) {
1901 SDValue V
= Node
->getOperand(i
);
1902 if (V
.getOpcode() == ISD::UNDEF
)
1904 ShuffleVec
[i
] = V
== Value1
? 0 : NumElems
;
1906 if (TLI
.isShuffleMaskLegal(ShuffleVec
, Node
->getValueType(0))) {
1907 // Get the splatted value into the low element of a vector register.
1908 SDValue Vec1
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value1
);
1910 if (Value2
.getNode())
1911 Vec2
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value2
);
1913 Vec2
= DAG
.getUNDEF(VT
);
1915 // Return shuffle(LowValVec, undef, <0,0,0,0>)
1916 return DAG
.getVectorShuffle(VT
, dl
, Vec1
, Vec2
, ShuffleVec
.data());
1920 // Otherwise, we can't handle this case efficiently.
1921 return ExpandVectorBuildThroughStack(Node
);
1924 // ExpandLibCall - Expand a node into a call to a libcall. If the result value
1925 // does not fit into a register, return the lo part and set the hi part to the
1926 // by-reg argument. If it does fit into a single register, return the result
1927 // and leave the Hi part unset.
1928 SDValue
SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC
, SDNode
*Node
,
1930 assert(!IsLegalizingCall
&& "Cannot overlap legalization of calls!");
1931 // The input chain to this libcall is the entry node of the function.
1932 // Legalizing the call will automatically add the previous call to the
1934 SDValue InChain
= DAG
.getEntryNode();
1936 TargetLowering::ArgListTy Args
;
1937 TargetLowering::ArgListEntry Entry
;
1938 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1939 EVT ArgVT
= Node
->getOperand(i
).getValueType();
1940 const Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
1941 Entry
.Node
= Node
->getOperand(i
); Entry
.Ty
= ArgTy
;
1942 Entry
.isSExt
= isSigned
;
1943 Entry
.isZExt
= !isSigned
;
1944 Args
.push_back(Entry
);
1946 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
1947 TLI
.getPointerTy());
1949 // Splice the libcall in wherever FindInputOutputChains tells us to.
1950 const Type
*RetTy
= Node
->getValueType(0).getTypeForEVT(*DAG
.getContext());
1951 std::pair
<SDValue
, SDValue
> CallInfo
=
1952 TLI
.LowerCallTo(InChain
, RetTy
, isSigned
, !isSigned
, false, false,
1953 0, TLI
.getLibcallCallingConv(LC
), false,
1954 /*isReturnValueUsed=*/true,
1955 Callee
, Args
, DAG
, Node
->getDebugLoc());
1957 // Legalize the call sequence, starting with the chain. This will advance
1958 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
1959 // was added by LowerCallTo (guaranteeing proper serialization of calls).
1960 LegalizeOp(CallInfo
.second
);
1961 return CallInfo
.first
;
1964 // ExpandChainLibCall - Expand a node into a call to a libcall. Similar to
1965 // ExpandLibCall except that the first operand is the in-chain.
1966 std::pair
<SDValue
, SDValue
>
1967 SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC
,
1970 assert(!IsLegalizingCall
&& "Cannot overlap legalization of calls!");
1971 SDValue InChain
= Node
->getOperand(0);
1973 TargetLowering::ArgListTy Args
;
1974 TargetLowering::ArgListEntry Entry
;
1975 for (unsigned i
= 1, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1976 EVT ArgVT
= Node
->getOperand(i
).getValueType();
1977 const Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
1978 Entry
.Node
= Node
->getOperand(i
);
1980 Entry
.isSExt
= isSigned
;
1981 Entry
.isZExt
= !isSigned
;
1982 Args
.push_back(Entry
);
1984 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
1985 TLI
.getPointerTy());
1987 // Splice the libcall in wherever FindInputOutputChains tells us to.
1988 const Type
*RetTy
= Node
->getValueType(0).getTypeForEVT(*DAG
.getContext());
1989 std::pair
<SDValue
, SDValue
> CallInfo
=
1990 TLI
.LowerCallTo(InChain
, RetTy
, isSigned
, !isSigned
, false, false,
1991 0, TLI
.getLibcallCallingConv(LC
), false,
1992 /*isReturnValueUsed=*/true,
1993 Callee
, Args
, DAG
, Node
->getDebugLoc());
1995 // Legalize the call sequence, starting with the chain. This will advance
1996 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
1997 // was added by LowerCallTo (guaranteeing proper serialization of calls).
1998 LegalizeOp(CallInfo
.second
);
2002 SDValue
SelectionDAGLegalize::ExpandFPLibCall(SDNode
* Node
,
2003 RTLIB::Libcall Call_F32
,
2004 RTLIB::Libcall Call_F64
,
2005 RTLIB::Libcall Call_F80
,
2006 RTLIB::Libcall Call_PPCF128
) {
2008 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
2009 default: assert(0 && "Unexpected request for libcall!");
2010 case MVT::f32
: LC
= Call_F32
; break;
2011 case MVT::f64
: LC
= Call_F64
; break;
2012 case MVT::f80
: LC
= Call_F80
; break;
2013 case MVT::ppcf128
: LC
= Call_PPCF128
; break;
2015 return ExpandLibCall(LC
, Node
, false);
2018 SDValue
SelectionDAGLegalize::ExpandIntLibCall(SDNode
* Node
, bool isSigned
,
2019 RTLIB::Libcall Call_I8
,
2020 RTLIB::Libcall Call_I16
,
2021 RTLIB::Libcall Call_I32
,
2022 RTLIB::Libcall Call_I64
,
2023 RTLIB::Libcall Call_I128
) {
2025 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
2026 default: assert(0 && "Unexpected request for libcall!");
2027 case MVT::i8
: LC
= Call_I8
; break;
2028 case MVT::i16
: LC
= Call_I16
; break;
2029 case MVT::i32
: LC
= Call_I32
; break;
2030 case MVT::i64
: LC
= Call_I64
; break;
2031 case MVT::i128
: LC
= Call_I128
; break;
2033 return ExpandLibCall(LC
, Node
, isSigned
);
2036 /// ExpandLegalINT_TO_FP - This function is responsible for legalizing a
2037 /// INT_TO_FP operation of the specified operand when the target requests that
2038 /// we expand it. At this point, we know that the result and operand types are
2039 /// legal for the target.
2040 SDValue
SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned
,
2044 if (Op0
.getValueType() == MVT::i32
) {
2045 // simple 32-bit [signed|unsigned] integer to float/double expansion
2047 // Get the stack frame index of a 8 byte buffer.
2048 SDValue StackSlot
= DAG
.CreateStackTemporary(MVT::f64
);
2050 // word offset constant for Hi/Lo address computation
2051 SDValue WordOff
= DAG
.getConstant(sizeof(int), TLI
.getPointerTy());
2052 // set up Hi and Lo (into buffer) address based on endian
2053 SDValue Hi
= StackSlot
;
2054 SDValue Lo
= DAG
.getNode(ISD::ADD
, dl
,
2055 TLI
.getPointerTy(), StackSlot
, WordOff
);
2056 if (TLI
.isLittleEndian())
2059 // if signed map to unsigned space
2062 // constant used to invert sign bit (signed to unsigned mapping)
2063 SDValue SignBit
= DAG
.getConstant(0x80000000u
, MVT::i32
);
2064 Op0Mapped
= DAG
.getNode(ISD::XOR
, dl
, MVT::i32
, Op0
, SignBit
);
2068 // store the lo of the constructed double - based on integer input
2069 SDValue Store1
= DAG
.getStore(DAG
.getEntryNode(), dl
,
2070 Op0Mapped
, Lo
, MachinePointerInfo(),
2072 // initial hi portion of constructed double
2073 SDValue InitialHi
= DAG
.getConstant(0x43300000u
, MVT::i32
);
2074 // store the hi of the constructed double - biased exponent
2075 SDValue Store2
= DAG
.getStore(Store1
, dl
, InitialHi
, Hi
,
2076 MachinePointerInfo(),
2078 // load the constructed double
2079 SDValue Load
= DAG
.getLoad(MVT::f64
, dl
, Store2
, StackSlot
,
2080 MachinePointerInfo(), false, false, 0);
2081 // FP constant to bias correct the final result
2082 SDValue Bias
= DAG
.getConstantFP(isSigned
?
2083 BitsToDouble(0x4330000080000000ULL
) :
2084 BitsToDouble(0x4330000000000000ULL
),
2086 // subtract the bias
2087 SDValue Sub
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f64
, Load
, Bias
);
2090 // handle final rounding
2091 if (DestVT
== MVT::f64
) {
2094 } else if (DestVT
.bitsLT(MVT::f64
)) {
2095 Result
= DAG
.getNode(ISD::FP_ROUND
, dl
, DestVT
, Sub
,
2096 DAG
.getIntPtrConstant(0));
2097 } else if (DestVT
.bitsGT(MVT::f64
)) {
2098 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
, DestVT
, Sub
);
2102 assert(!isSigned
&& "Legalize cannot Expand SINT_TO_FP for i64 yet");
2103 // Code below here assumes !isSigned without checking again.
2105 // Implementation of unsigned i64 to f64 following the algorithm in
2106 // __floatundidf in compiler_rt. This implementation has the advantage
2107 // of performing rounding correctly, both in the default rounding mode
2108 // and in all alternate rounding modes.
2109 // TODO: Generalize this for use with other types.
2110 if (Op0
.getValueType() == MVT::i64
&& DestVT
== MVT::f64
) {
2112 DAG
.getConstant(UINT64_C(0x4330000000000000), MVT::i64
);
2113 SDValue TwoP84PlusTwoP52
=
2114 DAG
.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64
);
2116 DAG
.getConstant(UINT64_C(0x4530000000000000), MVT::i64
);
2118 SDValue Lo
= DAG
.getZeroExtendInReg(Op0
, dl
, MVT::i32
);
2119 SDValue Hi
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Op0
,
2120 DAG
.getConstant(32, MVT::i64
));
2121 SDValue LoOr
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, Lo
, TwoP52
);
2122 SDValue HiOr
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, Hi
, TwoP84
);
2123 SDValue LoFlt
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, MVT::f64
, LoOr
);
2124 SDValue HiFlt
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, MVT::f64
, HiOr
);
2125 SDValue HiSub
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f64
, HiFlt
,
2127 return DAG
.getNode(ISD::FADD
, dl
, MVT::f64
, LoFlt
, HiSub
);
2130 // Implementation of unsigned i64 to f32.
2131 // TODO: Generalize this for use with other types.
2132 if (Op0
.getValueType() == MVT::i64
&& DestVT
== MVT::f32
) {
2133 // For unsigned conversions, convert them to signed conversions using the
2134 // algorithm from the x86_64 __floatundidf in compiler_rt.
2136 SDValue Fast
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::f32
, Op0
);
2138 SDValue ShiftConst
= DAG
.getConstant(1, TLI
.getShiftAmountTy());
2139 SDValue Shr
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Op0
, ShiftConst
);
2140 SDValue AndConst
= DAG
.getConstant(1, MVT::i64
);
2141 SDValue And
= DAG
.getNode(ISD::AND
, dl
, MVT::i64
, Op0
, AndConst
);
2142 SDValue Or
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, And
, Shr
);
2144 SDValue SignCvt
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::f32
, Or
);
2145 SDValue Slow
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, SignCvt
, SignCvt
);
2147 // TODO: This really should be implemented using a branch rather than a
2148 // select. We happen to get lucky and machinesink does the right
2149 // thing most of the time. This would be a good candidate for a
2150 //pseudo-op, or, even better, for whole-function isel.
2151 SDValue SignBitTest
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(MVT::i64
),
2152 Op0
, DAG
.getConstant(0, MVT::i64
), ISD::SETLT
);
2153 return DAG
.getNode(ISD::SELECT
, dl
, MVT::f32
, SignBitTest
, Slow
, Fast
);
2156 // Otherwise, implement the fully general conversion.
2157 EVT SHVT
= TLI
.getShiftAmountTy();
2159 SDValue And
= DAG
.getNode(ISD::AND
, dl
, MVT::i64
, Op0
,
2160 DAG
.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64
));
2161 SDValue Or
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, And
,
2162 DAG
.getConstant(UINT64_C(0x800), MVT::i64
));
2163 SDValue And2
= DAG
.getNode(ISD::AND
, dl
, MVT::i64
, Op0
,
2164 DAG
.getConstant(UINT64_C(0x7ff), MVT::i64
));
2165 SDValue Ne
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(MVT::i64
),
2166 And2
, DAG
.getConstant(UINT64_C(0), MVT::i64
), ISD::SETNE
);
2167 SDValue Sel
= DAG
.getNode(ISD::SELECT
, dl
, MVT::i64
, Ne
, Or
, Op0
);
2168 SDValue Ge
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(MVT::i64
),
2169 Op0
, DAG
.getConstant(UINT64_C(0x0020000000000000), MVT::i64
),
2171 SDValue Sel2
= DAG
.getNode(ISD::SELECT
, dl
, MVT::i64
, Ge
, Sel
, Op0
);
2173 SDValue Sh
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Sel2
,
2174 DAG
.getConstant(32, SHVT
));
2175 SDValue Trunc
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Sh
);
2176 SDValue Fcvt
= DAG
.getNode(ISD::UINT_TO_FP
, dl
, MVT::f64
, Trunc
);
2178 DAG
.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64
);
2179 SDValue Fmul
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f64
, TwoP32
, Fcvt
);
2180 SDValue Lo
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Sel2
);
2181 SDValue Fcvt2
= DAG
.getNode(ISD::UINT_TO_FP
, dl
, MVT::f64
, Lo
);
2182 SDValue Fadd
= DAG
.getNode(ISD::FADD
, dl
, MVT::f64
, Fmul
, Fcvt2
);
2183 return DAG
.getNode(ISD::FP_ROUND
, dl
, MVT::f32
, Fadd
,
2184 DAG
.getIntPtrConstant(0));
2187 SDValue Tmp1
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, DestVT
, Op0
);
2189 SDValue SignSet
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(Op0
.getValueType()),
2190 Op0
, DAG
.getConstant(0, Op0
.getValueType()),
2192 SDValue Zero
= DAG
.getIntPtrConstant(0), Four
= DAG
.getIntPtrConstant(4);
2193 SDValue CstOffset
= DAG
.getNode(ISD::SELECT
, dl
, Zero
.getValueType(),
2194 SignSet
, Four
, Zero
);
2196 // If the sign bit of the integer is set, the large number will be treated
2197 // as a negative number. To counteract this, the dynamic code adds an
2198 // offset depending on the data type.
2200 switch (Op0
.getValueType().getSimpleVT().SimpleTy
) {
2201 default: assert(0 && "Unsupported integer type!");
2202 case MVT::i8
: FF
= 0x43800000ULL
; break; // 2^8 (as a float)
2203 case MVT::i16
: FF
= 0x47800000ULL
; break; // 2^16 (as a float)
2204 case MVT::i32
: FF
= 0x4F800000ULL
; break; // 2^32 (as a float)
2205 case MVT::i64
: FF
= 0x5F800000ULL
; break; // 2^64 (as a float)
2207 if (TLI
.isLittleEndian()) FF
<<= 32;
2208 Constant
*FudgeFactor
= ConstantInt::get(
2209 Type::getInt64Ty(*DAG
.getContext()), FF
);
2211 SDValue CPIdx
= DAG
.getConstantPool(FudgeFactor
, TLI
.getPointerTy());
2212 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
2213 CPIdx
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), CPIdx
, CstOffset
);
2214 Alignment
= std::min(Alignment
, 4u);
2216 if (DestVT
== MVT::f32
)
2217 FudgeInReg
= DAG
.getLoad(MVT::f32
, dl
, DAG
.getEntryNode(), CPIdx
,
2218 MachinePointerInfo::getConstantPool(),
2219 false, false, Alignment
);
2222 LegalizeOp(DAG
.getExtLoad(ISD::EXTLOAD
, DestVT
, dl
,
2223 DAG
.getEntryNode(), CPIdx
,
2224 MachinePointerInfo::getConstantPool(),
2225 MVT::f32
, false, false, Alignment
));
2228 return DAG
.getNode(ISD::FADD
, dl
, DestVT
, Tmp1
, FudgeInReg
);
2231 /// PromoteLegalINT_TO_FP - This function is responsible for legalizing a
2232 /// *INT_TO_FP operation of the specified operand when the target requests that
2233 /// we promote it. At this point, we know that the result and operand types are
2234 /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
2235 /// operation that takes a larger input.
2236 SDValue
SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp
,
2240 // First step, figure out the appropriate *INT_TO_FP operation to use.
2241 EVT NewInTy
= LegalOp
.getValueType();
2243 unsigned OpToUse
= 0;
2245 // Scan for the appropriate larger type to use.
2247 NewInTy
= (MVT::SimpleValueType
)(NewInTy
.getSimpleVT().SimpleTy
+1);
2248 assert(NewInTy
.isInteger() && "Ran out of possibilities!");
2250 // If the target supports SINT_TO_FP of this type, use it.
2251 if (TLI
.isOperationLegalOrCustom(ISD::SINT_TO_FP
, NewInTy
)) {
2252 OpToUse
= ISD::SINT_TO_FP
;
2255 if (isSigned
) continue;
2257 // If the target supports UINT_TO_FP of this type, use it.
2258 if (TLI
.isOperationLegalOrCustom(ISD::UINT_TO_FP
, NewInTy
)) {
2259 OpToUse
= ISD::UINT_TO_FP
;
2263 // Otherwise, try a larger type.
2266 // Okay, we found the operation and type to use. Zero extend our input to the
2267 // desired type then run the operation on it.
2268 return DAG
.getNode(OpToUse
, dl
, DestVT
,
2269 DAG
.getNode(isSigned
? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
,
2270 dl
, NewInTy
, LegalOp
));
2273 /// PromoteLegalFP_TO_INT - This function is responsible for legalizing a
2274 /// FP_TO_*INT operation of the specified operand when the target requests that
2275 /// we promote it. At this point, we know that the result and operand types are
2276 /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
2277 /// operation that returns a larger result.
2278 SDValue
SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp
,
2282 // First step, figure out the appropriate FP_TO*INT operation to use.
2283 EVT NewOutTy
= DestVT
;
2285 unsigned OpToUse
= 0;
2287 // Scan for the appropriate larger type to use.
2289 NewOutTy
= (MVT::SimpleValueType
)(NewOutTy
.getSimpleVT().SimpleTy
+1);
2290 assert(NewOutTy
.isInteger() && "Ran out of possibilities!");
2292 if (TLI
.isOperationLegalOrCustom(ISD::FP_TO_SINT
, NewOutTy
)) {
2293 OpToUse
= ISD::FP_TO_SINT
;
2297 if (TLI
.isOperationLegalOrCustom(ISD::FP_TO_UINT
, NewOutTy
)) {
2298 OpToUse
= ISD::FP_TO_UINT
;
2302 // Otherwise, try a larger type.
2306 // Okay, we found the operation and type to use.
2307 SDValue Operation
= DAG
.getNode(OpToUse
, dl
, NewOutTy
, LegalOp
);
2309 // Truncate the result of the extended FP_TO_*INT operation to the desired
2311 return DAG
.getNode(ISD::TRUNCATE
, dl
, DestVT
, Operation
);
2314 /// ExpandBSWAP - Open code the operations for BSWAP of the specified operation.
2316 SDValue
SelectionDAGLegalize::ExpandBSWAP(SDValue Op
, DebugLoc dl
) {
2317 EVT VT
= Op
.getValueType();
2318 EVT SHVT
= TLI
.getShiftAmountTy();
2319 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
, Tmp5
, Tmp6
, Tmp7
, Tmp8
;
2320 switch (VT
.getSimpleVT().SimpleTy
) {
2321 default: assert(0 && "Unhandled Expand type in BSWAP!");
2323 Tmp2
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2324 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2325 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp1
, Tmp2
);
2327 Tmp4
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2328 Tmp3
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2329 Tmp2
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2330 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2331 Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp3
, DAG
.getConstant(0xFF0000, VT
));
2332 Tmp2
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp2
, DAG
.getConstant(0xFF00, VT
));
2333 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp3
);
2334 Tmp2
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp2
, Tmp1
);
2335 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp2
);
2337 Tmp8
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(56, SHVT
));
2338 Tmp7
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(40, SHVT
));
2339 Tmp6
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2340 Tmp5
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2341 Tmp4
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2342 Tmp3
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2343 Tmp2
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(40, SHVT
));
2344 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(56, SHVT
));
2345 Tmp7
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp7
, DAG
.getConstant(255ULL<<48, VT
));
2346 Tmp6
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp6
, DAG
.getConstant(255ULL<<40, VT
));
2347 Tmp5
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp5
, DAG
.getConstant(255ULL<<32, VT
));
2348 Tmp4
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp4
, DAG
.getConstant(255ULL<<24, VT
));
2349 Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp3
, DAG
.getConstant(255ULL<<16, VT
));
2350 Tmp2
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp2
, DAG
.getConstant(255ULL<<8 , VT
));
2351 Tmp8
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp7
);
2352 Tmp6
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp6
, Tmp5
);
2353 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp3
);
2354 Tmp2
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp2
, Tmp1
);
2355 Tmp8
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp6
);
2356 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp2
);
2357 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp4
);
2361 /// ExpandBitCount - Expand the specified bitcount instruction into operations.
2363 SDValue
SelectionDAGLegalize::ExpandBitCount(unsigned Opc
, SDValue Op
,
2366 default: assert(0 && "Cannot expand this yet!");
2368 static const uint64_t mask
[6] = {
2369 0x5555555555555555ULL
, 0x3333333333333333ULL
,
2370 0x0F0F0F0F0F0F0F0FULL
, 0x00FF00FF00FF00FFULL
,
2371 0x0000FFFF0000FFFFULL
, 0x00000000FFFFFFFFULL
2373 EVT VT
= Op
.getValueType();
2374 EVT ShVT
= TLI
.getShiftAmountTy();
2375 unsigned len
= VT
.getSizeInBits();
2376 for (unsigned i
= 0; (1U << i
) <= (len
/ 2); ++i
) {
2377 //x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8])
2378 unsigned EltSize
= VT
.isVector() ?
2379 VT
.getVectorElementType().getSizeInBits() : len
;
2380 SDValue Tmp2
= DAG
.getConstant(APInt(EltSize
, mask
[i
]), VT
);
2381 SDValue Tmp3
= DAG
.getConstant(1ULL << i
, ShVT
);
2382 Op
= DAG
.getNode(ISD::ADD
, dl
, VT
,
2383 DAG
.getNode(ISD::AND
, dl
, VT
, Op
, Tmp2
),
2384 DAG
.getNode(ISD::AND
, dl
, VT
,
2385 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, Tmp3
),
2391 // for now, we do this:
2392 // x = x | (x >> 1);
2393 // x = x | (x >> 2);
2395 // x = x | (x >>16);
2396 // x = x | (x >>32); // for 64-bit input
2397 // return popcount(~x);
2399 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc
2400 EVT VT
= Op
.getValueType();
2401 EVT ShVT
= TLI
.getShiftAmountTy();
2402 unsigned len
= VT
.getSizeInBits();
2403 for (unsigned i
= 0; (1U << i
) <= (len
/ 2); ++i
) {
2404 SDValue Tmp3
= DAG
.getConstant(1ULL << i
, ShVT
);
2405 Op
= DAG
.getNode(ISD::OR
, dl
, VT
, Op
,
2406 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, Tmp3
));
2408 Op
= DAG
.getNOT(dl
, Op
, VT
);
2409 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Op
);
2412 // for now, we use: { return popcount(~x & (x - 1)); }
2413 // unless the target has ctlz but not ctpop, in which case we use:
2414 // { return 32 - nlz(~x & (x-1)); }
2415 // see also http://www.hackersdelight.org/HDcode/ntz.cc
2416 EVT VT
= Op
.getValueType();
2417 SDValue Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
,
2418 DAG
.getNOT(dl
, Op
, VT
),
2419 DAG
.getNode(ISD::SUB
, dl
, VT
, Op
,
2420 DAG
.getConstant(1, VT
)));
2421 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
2422 if (!TLI
.isOperationLegalOrCustom(ISD::CTPOP
, VT
) &&
2423 TLI
.isOperationLegalOrCustom(ISD::CTLZ
, VT
))
2424 return DAG
.getNode(ISD::SUB
, dl
, VT
,
2425 DAG
.getConstant(VT
.getSizeInBits(), VT
),
2426 DAG
.getNode(ISD::CTLZ
, dl
, VT
, Tmp3
));
2427 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Tmp3
);
2432 std::pair
<SDValue
, SDValue
> SelectionDAGLegalize::ExpandAtomic(SDNode
*Node
) {
2433 unsigned Opc
= Node
->getOpcode();
2434 MVT VT
= cast
<AtomicSDNode
>(Node
)->getMemoryVT().getSimpleVT();
2439 llvm_unreachable("Unhandled atomic intrinsic Expand!");
2441 case ISD::ATOMIC_SWAP
:
2442 switch (VT
.SimpleTy
) {
2443 default: llvm_unreachable("Unexpected value type for atomic!");
2444 case MVT::i8
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_1
; break;
2445 case MVT::i16
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_2
; break;
2446 case MVT::i32
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_4
; break;
2447 case MVT::i64
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_8
; break;
2450 case ISD::ATOMIC_CMP_SWAP
:
2451 switch (VT
.SimpleTy
) {
2452 default: llvm_unreachable("Unexpected value type for atomic!");
2453 case MVT::i8
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1
; break;
2454 case MVT::i16
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2
; break;
2455 case MVT::i32
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4
; break;
2456 case MVT::i64
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8
; break;
2459 case ISD::ATOMIC_LOAD_ADD
:
2460 switch (VT
.SimpleTy
) {
2461 default: llvm_unreachable("Unexpected value type for atomic!");
2462 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_ADD_1
; break;
2463 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_ADD_2
; break;
2464 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_ADD_4
; break;
2465 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_ADD_8
; break;
2468 case ISD::ATOMIC_LOAD_SUB
:
2469 switch (VT
.SimpleTy
) {
2470 default: llvm_unreachable("Unexpected value type for atomic!");
2471 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_SUB_1
; break;
2472 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_SUB_2
; break;
2473 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_SUB_4
; break;
2474 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_SUB_8
; break;
2477 case ISD::ATOMIC_LOAD_AND
:
2478 switch (VT
.SimpleTy
) {
2479 default: llvm_unreachable("Unexpected value type for atomic!");
2480 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_AND_1
; break;
2481 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_AND_2
; break;
2482 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_AND_4
; break;
2483 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_AND_8
; break;
2486 case ISD::ATOMIC_LOAD_OR
:
2487 switch (VT
.SimpleTy
) {
2488 default: llvm_unreachable("Unexpected value type for atomic!");
2489 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_OR_1
; break;
2490 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_OR_2
; break;
2491 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_OR_4
; break;
2492 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_OR_8
; break;
2495 case ISD::ATOMIC_LOAD_XOR
:
2496 switch (VT
.SimpleTy
) {
2497 default: llvm_unreachable("Unexpected value type for atomic!");
2498 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_XOR_1
; break;
2499 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_XOR_2
; break;
2500 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_XOR_4
; break;
2501 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_XOR_8
; break;
2504 case ISD::ATOMIC_LOAD_NAND
:
2505 switch (VT
.SimpleTy
) {
2506 default: llvm_unreachable("Unexpected value type for atomic!");
2507 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_NAND_1
; break;
2508 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_NAND_2
; break;
2509 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_NAND_4
; break;
2510 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_NAND_8
; break;
2515 return ExpandChainLibCall(LC
, Node
, false);
2518 void SelectionDAGLegalize::ExpandNode(SDNode
*Node
,
2519 SmallVectorImpl
<SDValue
> &Results
) {
2520 DebugLoc dl
= Node
->getDebugLoc();
2521 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
;
2522 switch (Node
->getOpcode()) {
2526 Tmp1
= ExpandBitCount(Node
->getOpcode(), Node
->getOperand(0), dl
);
2527 Results
.push_back(Tmp1
);
2530 Results
.push_back(ExpandBSWAP(Node
->getOperand(0), dl
));
2532 case ISD::FRAMEADDR
:
2533 case ISD::RETURNADDR
:
2534 case ISD::FRAME_TO_ARGS_OFFSET
:
2535 Results
.push_back(DAG
.getConstant(0, Node
->getValueType(0)));
2537 case ISD::FLT_ROUNDS_
:
2538 Results
.push_back(DAG
.getConstant(1, Node
->getValueType(0)));
2540 case ISD::EH_RETURN
:
2544 case ISD::EH_SJLJ_LONGJMP
:
2545 case ISD::EH_SJLJ_DISPATCHSETUP
:
2546 // If the target didn't expand these, there's nothing to do, so just
2547 // preserve the chain and be done.
2548 Results
.push_back(Node
->getOperand(0));
2550 case ISD::EH_SJLJ_SETJMP
:
2551 // If the target didn't expand this, just return 'zero' and preserve the
2553 Results
.push_back(DAG
.getConstant(0, MVT::i32
));
2554 Results
.push_back(Node
->getOperand(0));
2556 case ISD::MEMBARRIER
: {
2557 // If the target didn't lower this, lower it to '__sync_synchronize()' call
2558 TargetLowering::ArgListTy Args
;
2559 std::pair
<SDValue
, SDValue
> CallResult
=
2560 TLI
.LowerCallTo(Node
->getOperand(0), Type::getVoidTy(*DAG
.getContext()),
2561 false, false, false, false, 0, CallingConv::C
, false,
2562 /*isReturnValueUsed=*/true,
2563 DAG
.getExternalSymbol("__sync_synchronize",
2564 TLI
.getPointerTy()),
2566 Results
.push_back(CallResult
.second
);
2569 // By default, atomic intrinsics are marked Legal and lowered. Targets
2570 // which don't support them directly, however, may want libcalls, in which
2571 // case they mark them Expand, and we get here.
2572 // FIXME: Unimplemented for now. Add libcalls.
2573 case ISD::ATOMIC_SWAP
:
2574 case ISD::ATOMIC_LOAD_ADD
:
2575 case ISD::ATOMIC_LOAD_SUB
:
2576 case ISD::ATOMIC_LOAD_AND
:
2577 case ISD::ATOMIC_LOAD_OR
:
2578 case ISD::ATOMIC_LOAD_XOR
:
2579 case ISD::ATOMIC_LOAD_NAND
:
2580 case ISD::ATOMIC_LOAD_MIN
:
2581 case ISD::ATOMIC_LOAD_MAX
:
2582 case ISD::ATOMIC_LOAD_UMIN
:
2583 case ISD::ATOMIC_LOAD_UMAX
:
2584 case ISD::ATOMIC_CMP_SWAP
: {
2585 std::pair
<SDValue
, SDValue
> Tmp
= ExpandAtomic(Node
);
2586 Results
.push_back(Tmp
.first
);
2587 Results
.push_back(Tmp
.second
);
2590 case ISD::DYNAMIC_STACKALLOC
:
2591 ExpandDYNAMIC_STACKALLOC(Node
, Results
);
2593 case ISD::MERGE_VALUES
:
2594 for (unsigned i
= 0; i
< Node
->getNumValues(); i
++)
2595 Results
.push_back(Node
->getOperand(i
));
2598 EVT VT
= Node
->getValueType(0);
2600 Results
.push_back(DAG
.getConstant(0, VT
));
2602 assert(VT
.isFloatingPoint() && "Unknown value type!");
2603 Results
.push_back(DAG
.getConstantFP(0, VT
));
2608 // If this operation is not supported, lower it to 'abort()' call
2609 TargetLowering::ArgListTy Args
;
2610 std::pair
<SDValue
, SDValue
> CallResult
=
2611 TLI
.LowerCallTo(Node
->getOperand(0), Type::getVoidTy(*DAG
.getContext()),
2612 false, false, false, false, 0, CallingConv::C
, false,
2613 /*isReturnValueUsed=*/true,
2614 DAG
.getExternalSymbol("abort", TLI
.getPointerTy()),
2616 Results
.push_back(CallResult
.second
);
2620 case ISD::BIT_CONVERT
:
2621 Tmp1
= EmitStackConvert(Node
->getOperand(0), Node
->getValueType(0),
2622 Node
->getValueType(0), dl
);
2623 Results
.push_back(Tmp1
);
2625 case ISD::FP_EXTEND
:
2626 Tmp1
= EmitStackConvert(Node
->getOperand(0),
2627 Node
->getOperand(0).getValueType(),
2628 Node
->getValueType(0), dl
);
2629 Results
.push_back(Tmp1
);
2631 case ISD::SIGN_EXTEND_INREG
: {
2632 // NOTE: we could fall back on load/store here too for targets without
2633 // SAR. However, it is doubtful that any exist.
2634 EVT ExtraVT
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
2635 EVT VT
= Node
->getValueType(0);
2636 EVT ShiftAmountTy
= TLI
.getShiftAmountTy();
2639 unsigned BitsDiff
= VT
.getScalarType().getSizeInBits() -
2640 ExtraVT
.getScalarType().getSizeInBits();
2641 SDValue ShiftCst
= DAG
.getConstant(BitsDiff
, ShiftAmountTy
);
2642 Tmp1
= DAG
.getNode(ISD::SHL
, dl
, Node
->getValueType(0),
2643 Node
->getOperand(0), ShiftCst
);
2644 Tmp1
= DAG
.getNode(ISD::SRA
, dl
, Node
->getValueType(0), Tmp1
, ShiftCst
);
2645 Results
.push_back(Tmp1
);
2648 case ISD::FP_ROUND_INREG
: {
2649 // The only way we can lower this is to turn it into a TRUNCSTORE,
2650 // EXTLOAD pair, targetting a temporary location (a stack slot).
2652 // NOTE: there is a choice here between constantly creating new stack
2653 // slots and always reusing the same one. We currently always create
2654 // new ones, as reuse may inhibit scheduling.
2655 EVT ExtraVT
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
2656 Tmp1
= EmitStackConvert(Node
->getOperand(0), ExtraVT
,
2657 Node
->getValueType(0), dl
);
2658 Results
.push_back(Tmp1
);
2661 case ISD::SINT_TO_FP
:
2662 case ISD::UINT_TO_FP
:
2663 Tmp1
= ExpandLegalINT_TO_FP(Node
->getOpcode() == ISD::SINT_TO_FP
,
2664 Node
->getOperand(0), Node
->getValueType(0), dl
);
2665 Results
.push_back(Tmp1
);
2667 case ISD::FP_TO_UINT
: {
2668 SDValue True
, False
;
2669 EVT VT
= Node
->getOperand(0).getValueType();
2670 EVT NVT
= Node
->getValueType(0);
2671 const uint64_t zero
[] = {0, 0};
2672 APFloat apf
= APFloat(APInt(VT
.getSizeInBits(), 2, zero
));
2673 APInt x
= APInt::getSignBit(NVT
.getSizeInBits());
2674 (void)apf
.convertFromAPInt(x
, false, APFloat::rmNearestTiesToEven
);
2675 Tmp1
= DAG
.getConstantFP(apf
, VT
);
2676 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
),
2677 Node
->getOperand(0),
2679 True
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, NVT
, Node
->getOperand(0));
2680 False
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, NVT
,
2681 DAG
.getNode(ISD::FSUB
, dl
, VT
,
2682 Node
->getOperand(0), Tmp1
));
2683 False
= DAG
.getNode(ISD::XOR
, dl
, NVT
, False
,
2684 DAG
.getConstant(x
, NVT
));
2685 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp2
, True
, False
);
2686 Results
.push_back(Tmp1
);
2690 const Value
*V
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
2691 EVT VT
= Node
->getValueType(0);
2692 Tmp1
= Node
->getOperand(0);
2693 Tmp2
= Node
->getOperand(1);
2694 unsigned Align
= Node
->getConstantOperandVal(3);
2696 SDValue VAListLoad
= DAG
.getLoad(TLI
.getPointerTy(), dl
, Tmp1
, Tmp2
,
2697 MachinePointerInfo(V
), false, false, 0);
2698 SDValue VAList
= VAListLoad
;
2700 if (Align
> TLI
.getMinStackArgumentAlignment()) {
2701 assert(((Align
& (Align
-1)) == 0) && "Expected Align to be a power of 2");
2703 VAList
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), VAList
,
2704 DAG
.getConstant(Align
- 1,
2705 TLI
.getPointerTy()));
2707 VAList
= DAG
.getNode(ISD::AND
, dl
, TLI
.getPointerTy(), VAList
,
2708 DAG
.getConstant(-(int64_t)Align
,
2709 TLI
.getPointerTy()));
2712 // Increment the pointer, VAList, to the next vaarg
2713 Tmp3
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), VAList
,
2714 DAG
.getConstant(TLI
.getTargetData()->
2715 getTypeAllocSize(VT
.getTypeForEVT(*DAG
.getContext())),
2716 TLI
.getPointerTy()));
2717 // Store the incremented VAList to the legalized pointer
2718 Tmp3
= DAG
.getStore(VAListLoad
.getValue(1), dl
, Tmp3
, Tmp2
,
2719 MachinePointerInfo(V
), false, false, 0);
2720 // Load the actual argument out of the pointer VAList
2721 Results
.push_back(DAG
.getLoad(VT
, dl
, Tmp3
, VAList
, MachinePointerInfo(),
2723 Results
.push_back(Results
[0].getValue(1));
2727 // This defaults to loading a pointer from the input and storing it to the
2728 // output, returning the chain.
2729 const Value
*VD
= cast
<SrcValueSDNode
>(Node
->getOperand(3))->getValue();
2730 const Value
*VS
= cast
<SrcValueSDNode
>(Node
->getOperand(4))->getValue();
2731 Tmp1
= DAG
.getLoad(TLI
.getPointerTy(), dl
, Node
->getOperand(0),
2732 Node
->getOperand(2), MachinePointerInfo(VS
),
2734 Tmp1
= DAG
.getStore(Tmp1
.getValue(1), dl
, Tmp1
, Node
->getOperand(1),
2735 MachinePointerInfo(VD
), false, false, 0);
2736 Results
.push_back(Tmp1
);
2739 case ISD::EXTRACT_VECTOR_ELT
:
2740 if (Node
->getOperand(0).getValueType().getVectorNumElements() == 1)
2741 // This must be an access of the only element. Return it.
2742 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, Node
->getValueType(0),
2743 Node
->getOperand(0));
2745 Tmp1
= ExpandExtractFromVectorThroughStack(SDValue(Node
, 0));
2746 Results
.push_back(Tmp1
);
2748 case ISD::EXTRACT_SUBVECTOR
:
2749 Results
.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node
, 0)));
2751 case ISD::CONCAT_VECTORS
: {
2752 Results
.push_back(ExpandVectorBuildThroughStack(Node
));
2755 case ISD::SCALAR_TO_VECTOR
:
2756 Results
.push_back(ExpandSCALAR_TO_VECTOR(Node
));
2758 case ISD::INSERT_VECTOR_ELT
:
2759 Results
.push_back(ExpandINSERT_VECTOR_ELT(Node
->getOperand(0),
2760 Node
->getOperand(1),
2761 Node
->getOperand(2), dl
));
2763 case ISD::VECTOR_SHUFFLE
: {
2764 SmallVector
<int, 8> Mask
;
2765 cast
<ShuffleVectorSDNode
>(Node
)->getMask(Mask
);
2767 EVT VT
= Node
->getValueType(0);
2768 EVT EltVT
= VT
.getVectorElementType();
2769 if (getTypeAction(EltVT
) == Promote
)
2770 EltVT
= TLI
.getTypeToTransformTo(*DAG
.getContext(), EltVT
);
2771 unsigned NumElems
= VT
.getVectorNumElements();
2772 SmallVector
<SDValue
, 8> Ops
;
2773 for (unsigned i
= 0; i
!= NumElems
; ++i
) {
2775 Ops
.push_back(DAG
.getUNDEF(EltVT
));
2778 unsigned Idx
= Mask
[i
];
2780 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
2781 Node
->getOperand(0),
2782 DAG
.getIntPtrConstant(Idx
)));
2784 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
2785 Node
->getOperand(1),
2786 DAG
.getIntPtrConstant(Idx
- NumElems
)));
2788 Tmp1
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, VT
, &Ops
[0], Ops
.size());
2789 Results
.push_back(Tmp1
);
2792 case ISD::EXTRACT_ELEMENT
: {
2793 EVT OpTy
= Node
->getOperand(0).getValueType();
2794 if (cast
<ConstantSDNode
>(Node
->getOperand(1))->getZExtValue()) {
2796 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, OpTy
, Node
->getOperand(0),
2797 DAG
.getConstant(OpTy
.getSizeInBits()/2,
2798 TLI
.getShiftAmountTy()));
2799 Tmp1
= DAG
.getNode(ISD::TRUNCATE
, dl
, Node
->getValueType(0), Tmp1
);
2802 Tmp1
= DAG
.getNode(ISD::TRUNCATE
, dl
, Node
->getValueType(0),
2803 Node
->getOperand(0));
2805 Results
.push_back(Tmp1
);
2808 case ISD::STACKSAVE
:
2809 // Expand to CopyFromReg if the target set
2810 // StackPointerRegisterToSaveRestore.
2811 if (unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore()) {
2812 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(0), dl
, SP
,
2813 Node
->getValueType(0)));
2814 Results
.push_back(Results
[0].getValue(1));
2816 Results
.push_back(DAG
.getUNDEF(Node
->getValueType(0)));
2817 Results
.push_back(Node
->getOperand(0));
2820 case ISD::STACKRESTORE
:
2821 // Expand to CopyToReg if the target set
2822 // StackPointerRegisterToSaveRestore.
2823 if (unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore()) {
2824 Results
.push_back(DAG
.getCopyToReg(Node
->getOperand(0), dl
, SP
,
2825 Node
->getOperand(1)));
2827 Results
.push_back(Node
->getOperand(0));
2830 case ISD::FCOPYSIGN
:
2831 Results
.push_back(ExpandFCOPYSIGN(Node
));
2834 // Expand Y = FNEG(X) -> Y = SUB -0.0, X
2835 Tmp1
= DAG
.getConstantFP(-0.0, Node
->getValueType(0));
2836 Tmp1
= DAG
.getNode(ISD::FSUB
, dl
, Node
->getValueType(0), Tmp1
,
2837 Node
->getOperand(0));
2838 Results
.push_back(Tmp1
);
2841 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X).
2842 EVT VT
= Node
->getValueType(0);
2843 Tmp1
= Node
->getOperand(0);
2844 Tmp2
= DAG
.getConstantFP(0.0, VT
);
2845 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(Tmp1
.getValueType()),
2846 Tmp1
, Tmp2
, ISD::SETUGT
);
2847 Tmp3
= DAG
.getNode(ISD::FNEG
, dl
, VT
, Tmp1
);
2848 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, VT
, Tmp2
, Tmp1
, Tmp3
);
2849 Results
.push_back(Tmp1
);
2853 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::SQRT_F32
, RTLIB::SQRT_F64
,
2854 RTLIB::SQRT_F80
, RTLIB::SQRT_PPCF128
));
2857 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::SIN_F32
, RTLIB::SIN_F64
,
2858 RTLIB::SIN_F80
, RTLIB::SIN_PPCF128
));
2861 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::COS_F32
, RTLIB::COS_F64
,
2862 RTLIB::COS_F80
, RTLIB::COS_PPCF128
));
2865 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG_F32
, RTLIB::LOG_F64
,
2866 RTLIB::LOG_F80
, RTLIB::LOG_PPCF128
));
2869 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG2_F32
, RTLIB::LOG2_F64
,
2870 RTLIB::LOG2_F80
, RTLIB::LOG2_PPCF128
));
2873 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG10_F32
, RTLIB::LOG10_F64
,
2874 RTLIB::LOG10_F80
, RTLIB::LOG10_PPCF128
));
2877 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::EXP_F32
, RTLIB::EXP_F64
,
2878 RTLIB::EXP_F80
, RTLIB::EXP_PPCF128
));
2881 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::EXP2_F32
, RTLIB::EXP2_F64
,
2882 RTLIB::EXP2_F80
, RTLIB::EXP2_PPCF128
));
2885 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::TRUNC_F32
, RTLIB::TRUNC_F64
,
2886 RTLIB::TRUNC_F80
, RTLIB::TRUNC_PPCF128
));
2889 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::FLOOR_F32
, RTLIB::FLOOR_F64
,
2890 RTLIB::FLOOR_F80
, RTLIB::FLOOR_PPCF128
));
2893 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::CEIL_F32
, RTLIB::CEIL_F64
,
2894 RTLIB::CEIL_F80
, RTLIB::CEIL_PPCF128
));
2897 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::RINT_F32
, RTLIB::RINT_F64
,
2898 RTLIB::RINT_F80
, RTLIB::RINT_PPCF128
));
2900 case ISD::FNEARBYINT
:
2901 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::NEARBYINT_F32
,
2902 RTLIB::NEARBYINT_F64
,
2903 RTLIB::NEARBYINT_F80
,
2904 RTLIB::NEARBYINT_PPCF128
));
2907 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::POWI_F32
, RTLIB::POWI_F64
,
2908 RTLIB::POWI_F80
, RTLIB::POWI_PPCF128
));
2911 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::POW_F32
, RTLIB::POW_F64
,
2912 RTLIB::POW_F80
, RTLIB::POW_PPCF128
));
2915 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::DIV_F32
, RTLIB::DIV_F64
,
2916 RTLIB::DIV_F80
, RTLIB::DIV_PPCF128
));
2919 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::REM_F32
, RTLIB::REM_F64
,
2920 RTLIB::REM_F80
, RTLIB::REM_PPCF128
));
2922 case ISD::FP16_TO_FP32
:
2923 Results
.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32
, Node
, false));
2925 case ISD::FP32_TO_FP16
:
2926 Results
.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16
, Node
, false));
2928 case ISD::ConstantFP
: {
2929 ConstantFPSDNode
*CFP
= cast
<ConstantFPSDNode
>(Node
);
2930 // Check to see if this FP immediate is already legal.
2931 // If this is a legal constant, turn it into a TargetConstantFP node.
2932 if (TLI
.isFPImmLegal(CFP
->getValueAPF(), Node
->getValueType(0)))
2933 Results
.push_back(SDValue(Node
, 0));
2935 Results
.push_back(ExpandConstantFP(CFP
, true, DAG
, TLI
));
2938 case ISD::EHSELECTION
: {
2939 unsigned Reg
= TLI
.getExceptionSelectorRegister();
2940 assert(Reg
&& "Can't expand to unknown register!");
2941 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(1), dl
, Reg
,
2942 Node
->getValueType(0)));
2943 Results
.push_back(Results
[0].getValue(1));
2946 case ISD::EXCEPTIONADDR
: {
2947 unsigned Reg
= TLI
.getExceptionAddressRegister();
2948 assert(Reg
&& "Can't expand to unknown register!");
2949 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(0), dl
, Reg
,
2950 Node
->getValueType(0)));
2951 Results
.push_back(Results
[0].getValue(1));
2955 EVT VT
= Node
->getValueType(0);
2956 assert(TLI
.isOperationLegalOrCustom(ISD::ADD
, VT
) &&
2957 TLI
.isOperationLegalOrCustom(ISD::XOR
, VT
) &&
2958 "Don't know how to expand this subtraction!");
2959 Tmp1
= DAG
.getNode(ISD::XOR
, dl
, VT
, Node
->getOperand(1),
2960 DAG
.getConstant(APInt::getAllOnesValue(VT
.getSizeInBits()), VT
));
2961 Tmp1
= DAG
.getNode(ISD::ADD
, dl
, VT
, Tmp2
, DAG
.getConstant(1, VT
));
2962 Results
.push_back(DAG
.getNode(ISD::ADD
, dl
, VT
, Node
->getOperand(0), Tmp1
));
2967 EVT VT
= Node
->getValueType(0);
2968 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2969 bool isSigned
= Node
->getOpcode() == ISD::SREM
;
2970 unsigned DivOpc
= isSigned
? ISD::SDIV
: ISD::UDIV
;
2971 unsigned DivRemOpc
= isSigned
? ISD::SDIVREM
: ISD::UDIVREM
;
2972 Tmp2
= Node
->getOperand(0);
2973 Tmp3
= Node
->getOperand(1);
2974 if (TLI
.isOperationLegalOrCustom(DivRemOpc
, VT
)) {
2975 Tmp1
= DAG
.getNode(DivRemOpc
, dl
, VTs
, Tmp2
, Tmp3
).getValue(1);
2976 } else if (TLI
.isOperationLegalOrCustom(DivOpc
, VT
)) {
2978 Tmp1
= DAG
.getNode(DivOpc
, dl
, VT
, Tmp2
, Tmp3
);
2979 Tmp1
= DAG
.getNode(ISD::MUL
, dl
, VT
, Tmp1
, Tmp3
);
2980 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, VT
, Tmp2
, Tmp1
);
2981 } else if (isSigned
) {
2982 Tmp1
= ExpandIntLibCall(Node
, true,
2984 RTLIB::SREM_I16
, RTLIB::SREM_I32
,
2985 RTLIB::SREM_I64
, RTLIB::SREM_I128
);
2987 Tmp1
= ExpandIntLibCall(Node
, false,
2989 RTLIB::UREM_I16
, RTLIB::UREM_I32
,
2990 RTLIB::UREM_I64
, RTLIB::UREM_I128
);
2992 Results
.push_back(Tmp1
);
2997 bool isSigned
= Node
->getOpcode() == ISD::SDIV
;
2998 unsigned DivRemOpc
= isSigned
? ISD::SDIVREM
: ISD::UDIVREM
;
2999 EVT VT
= Node
->getValueType(0);
3000 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
3001 if (TLI
.isOperationLegalOrCustom(DivRemOpc
, VT
))
3002 Tmp1
= DAG
.getNode(DivRemOpc
, dl
, VTs
, Node
->getOperand(0),
3003 Node
->getOperand(1));
3005 Tmp1
= ExpandIntLibCall(Node
, true,
3007 RTLIB::SDIV_I16
, RTLIB::SDIV_I32
,
3008 RTLIB::SDIV_I64
, RTLIB::SDIV_I128
);
3010 Tmp1
= ExpandIntLibCall(Node
, false,
3012 RTLIB::UDIV_I16
, RTLIB::UDIV_I32
,
3013 RTLIB::UDIV_I64
, RTLIB::UDIV_I128
);
3014 Results
.push_back(Tmp1
);
3019 unsigned ExpandOpcode
= Node
->getOpcode() == ISD::MULHU
? ISD::UMUL_LOHI
:
3021 EVT VT
= Node
->getValueType(0);
3022 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
3023 assert(TLI
.isOperationLegalOrCustom(ExpandOpcode
, VT
) &&
3024 "If this wasn't legal, it shouldn't have been created!");
3025 Tmp1
= DAG
.getNode(ExpandOpcode
, dl
, VTs
, Node
->getOperand(0),
3026 Node
->getOperand(1));
3027 Results
.push_back(Tmp1
.getValue(1));
3031 EVT VT
= Node
->getValueType(0);
3032 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
3033 // See if multiply or divide can be lowered using two-result operations.
3034 // We just need the low half of the multiply; try both the signed
3035 // and unsigned forms. If the target supports both SMUL_LOHI and
3036 // UMUL_LOHI, form a preference by checking which forms of plain
3037 // MULH it supports.
3038 bool HasSMUL_LOHI
= TLI
.isOperationLegalOrCustom(ISD::SMUL_LOHI
, VT
);
3039 bool HasUMUL_LOHI
= TLI
.isOperationLegalOrCustom(ISD::UMUL_LOHI
, VT
);
3040 bool HasMULHS
= TLI
.isOperationLegalOrCustom(ISD::MULHS
, VT
);
3041 bool HasMULHU
= TLI
.isOperationLegalOrCustom(ISD::MULHU
, VT
);
3042 unsigned OpToUse
= 0;
3043 if (HasSMUL_LOHI
&& !HasMULHS
) {
3044 OpToUse
= ISD::SMUL_LOHI
;
3045 } else if (HasUMUL_LOHI
&& !HasMULHU
) {
3046 OpToUse
= ISD::UMUL_LOHI
;
3047 } else if (HasSMUL_LOHI
) {
3048 OpToUse
= ISD::SMUL_LOHI
;
3049 } else if (HasUMUL_LOHI
) {
3050 OpToUse
= ISD::UMUL_LOHI
;
3053 Results
.push_back(DAG
.getNode(OpToUse
, dl
, VTs
, Node
->getOperand(0),
3054 Node
->getOperand(1)));
3057 Tmp1
= ExpandIntLibCall(Node
, false,
3059 RTLIB::MUL_I16
, RTLIB::MUL_I32
,
3060 RTLIB::MUL_I64
, RTLIB::MUL_I128
);
3061 Results
.push_back(Tmp1
);
3066 SDValue LHS
= Node
->getOperand(0);
3067 SDValue RHS
= Node
->getOperand(1);
3068 SDValue Sum
= DAG
.getNode(Node
->getOpcode() == ISD::SADDO
?
3069 ISD::ADD
: ISD::SUB
, dl
, LHS
.getValueType(),
3071 Results
.push_back(Sum
);
3072 EVT OType
= Node
->getValueType(1);
3074 SDValue Zero
= DAG
.getConstant(0, LHS
.getValueType());
3076 // LHSSign -> LHS >= 0
3077 // RHSSign -> RHS >= 0
3078 // SumSign -> Sum >= 0
3081 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
3083 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
3085 SDValue LHSSign
= DAG
.getSetCC(dl
, OType
, LHS
, Zero
, ISD::SETGE
);
3086 SDValue RHSSign
= DAG
.getSetCC(dl
, OType
, RHS
, Zero
, ISD::SETGE
);
3087 SDValue SignsMatch
= DAG
.getSetCC(dl
, OType
, LHSSign
, RHSSign
,
3088 Node
->getOpcode() == ISD::SADDO
?
3089 ISD::SETEQ
: ISD::SETNE
);
3091 SDValue SumSign
= DAG
.getSetCC(dl
, OType
, Sum
, Zero
, ISD::SETGE
);
3092 SDValue SumSignNE
= DAG
.getSetCC(dl
, OType
, LHSSign
, SumSign
, ISD::SETNE
);
3094 SDValue Cmp
= DAG
.getNode(ISD::AND
, dl
, OType
, SignsMatch
, SumSignNE
);
3095 Results
.push_back(Cmp
);
3100 SDValue LHS
= Node
->getOperand(0);
3101 SDValue RHS
= Node
->getOperand(1);
3102 SDValue Sum
= DAG
.getNode(Node
->getOpcode() == ISD::UADDO
?
3103 ISD::ADD
: ISD::SUB
, dl
, LHS
.getValueType(),
3105 Results
.push_back(Sum
);
3106 Results
.push_back(DAG
.getSetCC(dl
, Node
->getValueType(1), Sum
, LHS
,
3107 Node
->getOpcode () == ISD::UADDO
?
3108 ISD::SETULT
: ISD::SETUGT
));
3113 EVT VT
= Node
->getValueType(0);
3114 SDValue LHS
= Node
->getOperand(0);
3115 SDValue RHS
= Node
->getOperand(1);
3118 static const unsigned Ops
[2][3] =
3119 { { ISD::MULHU
, ISD::UMUL_LOHI
, ISD::ZERO_EXTEND
},
3120 { ISD::MULHS
, ISD::SMUL_LOHI
, ISD::SIGN_EXTEND
}};
3121 bool isSigned
= Node
->getOpcode() == ISD::SMULO
;
3122 if (TLI
.isOperationLegalOrCustom(Ops
[isSigned
][0], VT
)) {
3123 BottomHalf
= DAG
.getNode(ISD::MUL
, dl
, VT
, LHS
, RHS
);
3124 TopHalf
= DAG
.getNode(Ops
[isSigned
][0], dl
, VT
, LHS
, RHS
);
3125 } else if (TLI
.isOperationLegalOrCustom(Ops
[isSigned
][1], VT
)) {
3126 BottomHalf
= DAG
.getNode(Ops
[isSigned
][1], dl
, DAG
.getVTList(VT
, VT
), LHS
,
3128 TopHalf
= BottomHalf
.getValue(1);
3130 // FIXME: We should be able to fall back to a libcall with an illegal
3131 // type in some cases.
3132 // Also, we can fall back to a division in some cases, but that's a big
3133 // performance hit in the general case.
3134 assert(TLI
.isTypeLegal(EVT::getIntegerVT(*DAG
.getContext(),
3135 VT
.getSizeInBits() * 2)) &&
3136 "Don't know how to expand this operation yet!");
3137 EVT WideVT
= EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits() * 2);
3138 LHS
= DAG
.getNode(Ops
[isSigned
][2], dl
, WideVT
, LHS
);
3139 RHS
= DAG
.getNode(Ops
[isSigned
][2], dl
, WideVT
, RHS
);
3140 Tmp1
= DAG
.getNode(ISD::MUL
, dl
, WideVT
, LHS
, RHS
);
3141 BottomHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Tmp1
,
3142 DAG
.getIntPtrConstant(0));
3143 TopHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Tmp1
,
3144 DAG
.getIntPtrConstant(1));
3147 Tmp1
= DAG
.getConstant(VT
.getSizeInBits() - 1, TLI
.getShiftAmountTy());
3148 Tmp1
= DAG
.getNode(ISD::SRA
, dl
, VT
, BottomHalf
, Tmp1
);
3149 TopHalf
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
), TopHalf
, Tmp1
,
3152 TopHalf
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
), TopHalf
,
3153 DAG
.getConstant(0, VT
), ISD::SETNE
);
3155 Results
.push_back(BottomHalf
);
3156 Results
.push_back(TopHalf
);
3159 case ISD::BUILD_PAIR
: {
3160 EVT PairTy
= Node
->getValueType(0);
3161 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, PairTy
, Node
->getOperand(0));
3162 Tmp2
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, PairTy
, Node
->getOperand(1));
3163 Tmp2
= DAG
.getNode(ISD::SHL
, dl
, PairTy
, Tmp2
,
3164 DAG
.getConstant(PairTy
.getSizeInBits()/2,
3165 TLI
.getShiftAmountTy()));
3166 Results
.push_back(DAG
.getNode(ISD::OR
, dl
, PairTy
, Tmp1
, Tmp2
));
3170 Tmp1
= Node
->getOperand(0);
3171 Tmp2
= Node
->getOperand(1);
3172 Tmp3
= Node
->getOperand(2);
3173 if (Tmp1
.getOpcode() == ISD::SETCC
) {
3174 Tmp1
= DAG
.getSelectCC(dl
, Tmp1
.getOperand(0), Tmp1
.getOperand(1),
3176 cast
<CondCodeSDNode
>(Tmp1
.getOperand(2))->get());
3178 Tmp1
= DAG
.getSelectCC(dl
, Tmp1
,
3179 DAG
.getConstant(0, Tmp1
.getValueType()),
3180 Tmp2
, Tmp3
, ISD::SETNE
);
3182 Results
.push_back(Tmp1
);
3185 SDValue Chain
= Node
->getOperand(0);
3186 SDValue Table
= Node
->getOperand(1);
3187 SDValue Index
= Node
->getOperand(2);
3189 EVT PTy
= TLI
.getPointerTy();
3191 const TargetData
&TD
= *TLI
.getTargetData();
3192 unsigned EntrySize
=
3193 DAG
.getMachineFunction().getJumpTableInfo()->getEntrySize(TD
);
3195 Index
= DAG
.getNode(ISD::MUL
, dl
, PTy
,
3196 Index
, DAG
.getConstant(EntrySize
, PTy
));
3197 SDValue Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Index
, Table
);
3199 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(), EntrySize
* 8);
3200 SDValue LD
= DAG
.getExtLoad(ISD::SEXTLOAD
, PTy
, dl
, Chain
, Addr
,
3201 MachinePointerInfo::getJumpTable(), MemVT
,
3204 if (TM
.getRelocationModel() == Reloc::PIC_
) {
3205 // For PIC, the sequence is:
3206 // BRIND(load(Jumptable + index) + RelocBase)
3207 // RelocBase can be JumpTable, GOT or some sort of global base.
3208 Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Addr
,
3209 TLI
.getPICJumpTableRelocBase(Table
, DAG
));
3211 Tmp1
= DAG
.getNode(ISD::BRIND
, dl
, MVT::Other
, LD
.getValue(1), Addr
);
3212 Results
.push_back(Tmp1
);
3216 // Expand brcond's setcc into its constituent parts and create a BR_CC
3218 Tmp1
= Node
->getOperand(0);
3219 Tmp2
= Node
->getOperand(1);
3220 if (Tmp2
.getOpcode() == ISD::SETCC
) {
3221 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, MVT::Other
,
3222 Tmp1
, Tmp2
.getOperand(2),
3223 Tmp2
.getOperand(0), Tmp2
.getOperand(1),
3224 Node
->getOperand(2));
3226 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, MVT::Other
, Tmp1
,
3227 DAG
.getCondCode(ISD::SETNE
), Tmp2
,
3228 DAG
.getConstant(0, Tmp2
.getValueType()),
3229 Node
->getOperand(2));
3231 Results
.push_back(Tmp1
);
3234 Tmp1
= Node
->getOperand(0);
3235 Tmp2
= Node
->getOperand(1);
3236 Tmp3
= Node
->getOperand(2);
3237 LegalizeSetCCCondCode(Node
->getValueType(0), Tmp1
, Tmp2
, Tmp3
, dl
);
3239 // If we expanded the SETCC into an AND/OR, return the new node
3240 if (Tmp2
.getNode() == 0) {
3241 Results
.push_back(Tmp1
);
3245 // Otherwise, SETCC for the given comparison type must be completely
3246 // illegal; expand it into a SELECT_CC.
3247 EVT VT
= Node
->getValueType(0);
3248 Tmp1
= DAG
.getNode(ISD::SELECT_CC
, dl
, VT
, Tmp1
, Tmp2
,
3249 DAG
.getConstant(1, VT
), DAG
.getConstant(0, VT
), Tmp3
);
3250 Results
.push_back(Tmp1
);
3253 case ISD::SELECT_CC
: {
3254 Tmp1
= Node
->getOperand(0); // LHS
3255 Tmp2
= Node
->getOperand(1); // RHS
3256 Tmp3
= Node
->getOperand(2); // True
3257 Tmp4
= Node
->getOperand(3); // False
3258 SDValue CC
= Node
->getOperand(4);
3260 LegalizeSetCCCondCode(TLI
.getSetCCResultType(Tmp1
.getValueType()),
3261 Tmp1
, Tmp2
, CC
, dl
);
3263 assert(!Tmp2
.getNode() && "Can't legalize SELECT_CC with legal condition!");
3264 Tmp2
= DAG
.getConstant(0, Tmp1
.getValueType());
3265 CC
= DAG
.getCondCode(ISD::SETNE
);
3266 Tmp1
= DAG
.getNode(ISD::SELECT_CC
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
3268 Results
.push_back(Tmp1
);
3272 Tmp1
= Node
->getOperand(0); // Chain
3273 Tmp2
= Node
->getOperand(2); // LHS
3274 Tmp3
= Node
->getOperand(3); // RHS
3275 Tmp4
= Node
->getOperand(1); // CC
3277 LegalizeSetCCCondCode(TLI
.getSetCCResultType(Tmp2
.getValueType()),
3278 Tmp2
, Tmp3
, Tmp4
, dl
);
3279 LastCALLSEQ_END
= DAG
.getEntryNode();
3281 assert(!Tmp3
.getNode() && "Can't legalize BR_CC with legal condition!");
3282 Tmp3
= DAG
.getConstant(0, Tmp2
.getValueType());
3283 Tmp4
= DAG
.getCondCode(ISD::SETNE
);
3284 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, Node
->getValueType(0), Tmp1
, Tmp4
, Tmp2
,
3285 Tmp3
, Node
->getOperand(4));
3286 Results
.push_back(Tmp1
);
3289 case ISD::GLOBAL_OFFSET_TABLE
:
3290 case ISD::GlobalAddress
:
3291 case ISD::GlobalTLSAddress
:
3292 case ISD::ExternalSymbol
:
3293 case ISD::ConstantPool
:
3294 case ISD::JumpTable
:
3295 case ISD::INTRINSIC_W_CHAIN
:
3296 case ISD::INTRINSIC_WO_CHAIN
:
3297 case ISD::INTRINSIC_VOID
:
3298 // FIXME: Custom lowering for these operations shouldn't return null!
3299 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
3300 Results
.push_back(SDValue(Node
, i
));
3304 void SelectionDAGLegalize::PromoteNode(SDNode
*Node
,
3305 SmallVectorImpl
<SDValue
> &Results
) {
3306 EVT OVT
= Node
->getValueType(0);
3307 if (Node
->getOpcode() == ISD::UINT_TO_FP
||
3308 Node
->getOpcode() == ISD::SINT_TO_FP
||
3309 Node
->getOpcode() == ISD::SETCC
) {
3310 OVT
= Node
->getOperand(0).getValueType();
3312 EVT NVT
= TLI
.getTypeToPromoteTo(Node
->getOpcode(), OVT
);
3313 DebugLoc dl
= Node
->getDebugLoc();
3314 SDValue Tmp1
, Tmp2
, Tmp3
;
3315 switch (Node
->getOpcode()) {
3319 // Zero extend the argument.
3320 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, NVT
, Node
->getOperand(0));
3321 // Perform the larger operation.
3322 Tmp1
= DAG
.getNode(Node
->getOpcode(), dl
, NVT
, Tmp1
);
3323 if (Node
->getOpcode() == ISD::CTTZ
) {
3324 //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT)
3325 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(NVT
),
3326 Tmp1
, DAG
.getConstant(NVT
.getSizeInBits(), NVT
),
3328 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp2
,
3329 DAG
.getConstant(OVT
.getSizeInBits(), NVT
), Tmp1
);
3330 } else if (Node
->getOpcode() == ISD::CTLZ
) {
3331 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
3332 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, NVT
, Tmp1
,
3333 DAG
.getConstant(NVT
.getSizeInBits() -
3334 OVT
.getSizeInBits(), NVT
));
3336 Results
.push_back(DAG
.getNode(ISD::TRUNCATE
, dl
, OVT
, Tmp1
));
3339 unsigned DiffBits
= NVT
.getSizeInBits() - OVT
.getSizeInBits();
3340 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, NVT
, Node
->getOperand(0));
3341 Tmp1
= DAG
.getNode(ISD::BSWAP
, dl
, NVT
, Tmp1
);
3342 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, NVT
, Tmp1
,
3343 DAG
.getConstant(DiffBits
, TLI
.getShiftAmountTy()));
3344 Results
.push_back(Tmp1
);
3347 case ISD::FP_TO_UINT
:
3348 case ISD::FP_TO_SINT
:
3349 Tmp1
= PromoteLegalFP_TO_INT(Node
->getOperand(0), Node
->getValueType(0),
3350 Node
->getOpcode() == ISD::FP_TO_SINT
, dl
);
3351 Results
.push_back(Tmp1
);
3353 case ISD::UINT_TO_FP
:
3354 case ISD::SINT_TO_FP
:
3355 Tmp1
= PromoteLegalINT_TO_FP(Node
->getOperand(0), Node
->getValueType(0),
3356 Node
->getOpcode() == ISD::SINT_TO_FP
, dl
);
3357 Results
.push_back(Tmp1
);
3362 unsigned ExtOp
, TruncOp
;
3363 if (OVT
.isVector()) {
3364 ExtOp
= ISD::BIT_CONVERT
;
3365 TruncOp
= ISD::BIT_CONVERT
;
3367 assert(OVT
.isInteger() && "Cannot promote logic operation");
3368 ExtOp
= ISD::ANY_EXTEND
;
3369 TruncOp
= ISD::TRUNCATE
;
3371 // Promote each of the values to the new type.
3372 Tmp1
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(0));
3373 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3374 // Perform the larger operation, then convert back
3375 Tmp1
= DAG
.getNode(Node
->getOpcode(), dl
, NVT
, Tmp1
, Tmp2
);
3376 Results
.push_back(DAG
.getNode(TruncOp
, dl
, OVT
, Tmp1
));
3380 unsigned ExtOp
, TruncOp
;
3381 if (Node
->getValueType(0).isVector()) {
3382 ExtOp
= ISD::BIT_CONVERT
;
3383 TruncOp
= ISD::BIT_CONVERT
;
3384 } else if (Node
->getValueType(0).isInteger()) {
3385 ExtOp
= ISD::ANY_EXTEND
;
3386 TruncOp
= ISD::TRUNCATE
;
3388 ExtOp
= ISD::FP_EXTEND
;
3389 TruncOp
= ISD::FP_ROUND
;
3391 Tmp1
= Node
->getOperand(0);
3392 // Promote each of the values to the new type.
3393 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3394 Tmp3
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(2));
3395 // Perform the larger operation, then round down.
3396 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp1
, Tmp2
, Tmp3
);
3397 if (TruncOp
!= ISD::FP_ROUND
)
3398 Tmp1
= DAG
.getNode(TruncOp
, dl
, Node
->getValueType(0), Tmp1
);
3400 Tmp1
= DAG
.getNode(TruncOp
, dl
, Node
->getValueType(0), Tmp1
,
3401 DAG
.getIntPtrConstant(0));
3402 Results
.push_back(Tmp1
);
3405 case ISD::VECTOR_SHUFFLE
: {
3406 SmallVector
<int, 8> Mask
;
3407 cast
<ShuffleVectorSDNode
>(Node
)->getMask(Mask
);
3409 // Cast the two input vectors.
3410 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, NVT
, Node
->getOperand(0));
3411 Tmp2
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, NVT
, Node
->getOperand(1));
3413 // Convert the shuffle mask to the right # elements.
3414 Tmp1
= ShuffleWithNarrowerEltType(NVT
, OVT
, dl
, Tmp1
, Tmp2
, Mask
);
3415 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, OVT
, Tmp1
);
3416 Results
.push_back(Tmp1
);
3420 unsigned ExtOp
= ISD::FP_EXTEND
;
3421 if (NVT
.isInteger()) {
3422 ISD::CondCode CCCode
=
3423 cast
<CondCodeSDNode
>(Node
->getOperand(2))->get();
3424 ExtOp
= isSignedIntSetCC(CCCode
) ? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
;
3426 Tmp1
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(0));
3427 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3428 Results
.push_back(DAG
.getNode(ISD::SETCC
, dl
, Node
->getValueType(0),
3429 Tmp1
, Tmp2
, Node
->getOperand(2)));
3435 // SelectionDAG::Legalize - This is the entry point for the file.
3437 void SelectionDAG::Legalize(CodeGenOpt::Level OptLevel
) {
3438 /// run - This is the main entry point to this class.
3440 SelectionDAGLegalize(*this, OptLevel
).LegalizeDAG();