1 //===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SelectionDAG::Legalize method.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/DebugInfo.h"
15 #include "llvm/CodeGen/Analysis.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/MachineJumpTableInfo.h"
18 #include "llvm/CodeGen/SelectionDAG.h"
19 #include "llvm/Target/TargetFrameLowering.h"
20 #include "llvm/Target/TargetLowering.h"
21 #include "llvm/Target/TargetData.h"
22 #include "llvm/Target/TargetMachine.h"
23 #include "llvm/CallingConv.h"
24 #include "llvm/Constants.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/LLVMContext.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/MathExtras.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/ADT/DenseMap.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/SmallPtrSet.h"
36 //===----------------------------------------------------------------------===//
37 /// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and
38 /// hacks on it until the target machine can handle it. This involves
39 /// eliminating value sizes the machine cannot handle (promoting small sizes to
40 /// large sizes or splitting up large values into small values) as well as
41 /// eliminating operations the machine cannot handle.
43 /// This code also does a small amount of optimization and recognition of idioms
44 /// as part of its processing. For example, if a target does not support a
45 /// 'setcc' instruction efficiently, but does support 'brcc' instruction, this
46 /// will attempt merge setcc and brc instructions into brcc's.
49 class SelectionDAGLegalize
{
50 const TargetMachine
&TM
;
51 const TargetLowering
&TLI
;
54 // Libcall insertion helpers.
56 /// LastCALLSEQ - This keeps track of the CALLSEQ_END node that has been
57 /// legalized. We use this to ensure that calls are properly serialized
58 /// against each other, including inserted libcalls.
59 SmallVector
<SDValue
, 8> LastCALLSEQ
;
62 Legal
, // The target natively supports this operation.
63 Promote
, // This operation should be executed in a larger type.
64 Expand
// Try to expand this to other ops, otherwise use a libcall.
67 /// ValueTypeActions - This is a bitvector that contains two bits for each
68 /// value type, where the two bits correspond to the LegalizeAction enum.
69 /// This can be queried with "getTypeAction(VT)".
70 TargetLowering::ValueTypeActionImpl ValueTypeActions
;
72 /// LegalizedNodes - For nodes that are of legal width, and that have more
73 /// than one use, this map indicates what regularized operand to use. This
74 /// allows us to avoid legalizing the same thing more than once.
75 DenseMap
<SDValue
, SDValue
> LegalizedNodes
;
77 void AddLegalizedOperand(SDValue From
, SDValue To
) {
78 LegalizedNodes
.insert(std::make_pair(From
, To
));
79 // If someone requests legalization of the new node, return itself.
81 LegalizedNodes
.insert(std::make_pair(To
, To
));
83 // Transfer SDDbgValues.
84 DAG
.TransferDbgValues(From
, To
);
88 explicit SelectionDAGLegalize(SelectionDAG
&DAG
);
90 /// getTypeAction - Return how we should legalize values of this type, either
91 /// it is already legal or we need to expand it into multiple registers of
92 /// smaller integer type, or we need to promote it to a larger type.
93 LegalizeAction
getTypeAction(EVT VT
) const {
94 return (LegalizeAction
)TLI
.getTypeAction(*DAG
.getContext(), VT
);
97 /// isTypeLegal - Return true if this type is legal on this target.
99 bool isTypeLegal(EVT VT
) const {
100 return getTypeAction(VT
) == Legal
;
106 /// LegalizeOp - We know that the specified value has a legal type.
107 /// Recursively ensure that the operands have legal types, then return the
109 SDValue
LegalizeOp(SDValue O
);
111 SDValue
OptimizeFloatStore(StoreSDNode
*ST
);
113 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
114 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
115 /// is necessary to spill the vector being inserted into to memory, perform
116 /// the insert there, and then read the result back.
117 SDValue
PerformInsertVectorEltInMemory(SDValue Vec
, SDValue Val
,
118 SDValue Idx
, DebugLoc dl
);
119 SDValue
ExpandINSERT_VECTOR_ELT(SDValue Vec
, SDValue Val
,
120 SDValue Idx
, DebugLoc dl
);
122 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
123 /// performs the same shuffe in terms of order or result bytes, but on a type
124 /// whose vector element type is narrower than the original shuffle type.
125 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
126 SDValue
ShuffleWithNarrowerEltType(EVT NVT
, EVT VT
, DebugLoc dl
,
127 SDValue N1
, SDValue N2
,
128 SmallVectorImpl
<int> &Mask
) const;
130 bool LegalizeAllNodesNotLeadingTo(SDNode
*N
, SDNode
*Dest
,
131 SmallPtrSet
<SDNode
*, 32> &NodesLeadingTo
);
133 void LegalizeSetCCCondCode(EVT VT
, SDValue
&LHS
, SDValue
&RHS
, SDValue
&CC
,
136 SDValue
ExpandLibCall(RTLIB::Libcall LC
, SDNode
*Node
, bool isSigned
);
137 SDValue
ExpandLibCall(RTLIB::Libcall LC
, EVT RetVT
, const SDValue
*Ops
,
138 unsigned NumOps
, bool isSigned
, DebugLoc dl
);
140 std::pair
<SDValue
, SDValue
> ExpandChainLibCall(RTLIB::Libcall LC
,
141 SDNode
*Node
, bool isSigned
);
142 SDValue
ExpandFPLibCall(SDNode
*Node
, RTLIB::Libcall Call_F32
,
143 RTLIB::Libcall Call_F64
, RTLIB::Libcall Call_F80
,
144 RTLIB::Libcall Call_PPCF128
);
145 SDValue
ExpandIntLibCall(SDNode
*Node
, bool isSigned
,
146 RTLIB::Libcall Call_I8
,
147 RTLIB::Libcall Call_I16
,
148 RTLIB::Libcall Call_I32
,
149 RTLIB::Libcall Call_I64
,
150 RTLIB::Libcall Call_I128
);
151 void ExpandDivRemLibCall(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
153 SDValue
EmitStackConvert(SDValue SrcOp
, EVT SlotVT
, EVT DestVT
, DebugLoc dl
);
154 SDValue
ExpandBUILD_VECTOR(SDNode
*Node
);
155 SDValue
ExpandSCALAR_TO_VECTOR(SDNode
*Node
);
156 void ExpandDYNAMIC_STACKALLOC(SDNode
*Node
,
157 SmallVectorImpl
<SDValue
> &Results
);
158 SDValue
ExpandFCOPYSIGN(SDNode
*Node
);
159 SDValue
ExpandLegalINT_TO_FP(bool isSigned
, SDValue LegalOp
, EVT DestVT
,
161 SDValue
PromoteLegalINT_TO_FP(SDValue LegalOp
, EVT DestVT
, bool isSigned
,
163 SDValue
PromoteLegalFP_TO_INT(SDValue LegalOp
, EVT DestVT
, bool isSigned
,
166 SDValue
ExpandBSWAP(SDValue Op
, DebugLoc dl
);
167 SDValue
ExpandBitCount(unsigned Opc
, SDValue Op
, DebugLoc dl
);
169 SDValue
ExpandExtractFromVectorThroughStack(SDValue Op
);
170 SDValue
ExpandInsertToVectorThroughStack(SDValue Op
);
171 SDValue
ExpandVectorBuildThroughStack(SDNode
* Node
);
173 std::pair
<SDValue
, SDValue
> ExpandAtomic(SDNode
*Node
);
175 void ExpandNode(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
176 void PromoteNode(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
178 SDValue
getLastCALLSEQ() { return LastCALLSEQ
.back(); }
179 void setLastCALLSEQ(const SDValue s
) { LastCALLSEQ
.back() = s
; }
180 void pushLastCALLSEQ(SDValue s
) {
181 LastCALLSEQ
.push_back(s
);
183 void popLastCALLSEQ() {
184 LastCALLSEQ
.pop_back();
189 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
190 /// performs the same shuffe in terms of order or result bytes, but on a type
191 /// whose vector element type is narrower than the original shuffle type.
192 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
194 SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT
, EVT VT
, DebugLoc dl
,
195 SDValue N1
, SDValue N2
,
196 SmallVectorImpl
<int> &Mask
) const {
197 unsigned NumMaskElts
= VT
.getVectorNumElements();
198 unsigned NumDestElts
= NVT
.getVectorNumElements();
199 unsigned NumEltsGrowth
= NumDestElts
/ NumMaskElts
;
201 assert(NumEltsGrowth
&& "Cannot promote to vector type with fewer elts!");
203 if (NumEltsGrowth
== 1)
204 return DAG
.getVectorShuffle(NVT
, dl
, N1
, N2
, &Mask
[0]);
206 SmallVector
<int, 8> NewMask
;
207 for (unsigned i
= 0; i
!= NumMaskElts
; ++i
) {
209 for (unsigned j
= 0; j
!= NumEltsGrowth
; ++j
) {
211 NewMask
.push_back(-1);
213 NewMask
.push_back(Idx
* NumEltsGrowth
+ j
);
216 assert(NewMask
.size() == NumDestElts
&& "Non-integer NumEltsGrowth?");
217 assert(TLI
.isShuffleMaskLegal(NewMask
, NVT
) && "Shuffle not legal?");
218 return DAG
.getVectorShuffle(NVT
, dl
, N1
, N2
, &NewMask
[0]);
221 SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG
&dag
)
222 : TM(dag
.getTarget()), TLI(dag
.getTargetLoweringInfo()),
224 ValueTypeActions(TLI
.getValueTypeActions()) {
225 assert(MVT::LAST_VALUETYPE
<= MVT::MAX_ALLOWED_VALUETYPE
&&
226 "Too many value types for ValueTypeActions to hold!");
229 void SelectionDAGLegalize::LegalizeDAG() {
230 pushLastCALLSEQ(DAG
.getEntryNode());
232 // The legalize process is inherently a bottom-up recursive process (users
233 // legalize their uses before themselves). Given infinite stack space, we
234 // could just start legalizing on the root and traverse the whole graph. In
235 // practice however, this causes us to run out of stack space on large basic
236 // blocks. To avoid this problem, compute an ordering of the nodes where each
237 // node is only legalized after all of its operands are legalized.
238 DAG
.AssignTopologicalOrder();
239 for (SelectionDAG::allnodes_iterator I
= DAG
.allnodes_begin(),
240 E
= prior(DAG
.allnodes_end()); I
!= llvm::next(E
); ++I
)
241 LegalizeOp(SDValue(I
, 0));
243 // Finally, it's possible the root changed. Get the new root.
244 SDValue OldRoot
= DAG
.getRoot();
245 assert(LegalizedNodes
.count(OldRoot
) && "Root didn't get legalized?");
246 DAG
.setRoot(LegalizedNodes
[OldRoot
]);
248 LegalizedNodes
.clear();
250 // Remove dead nodes now.
251 DAG
.RemoveDeadNodes();
255 /// FindCallEndFromCallStart - Given a chained node that is part of a call
256 /// sequence, find the CALLSEQ_END node that terminates the call sequence.
257 static SDNode
*FindCallEndFromCallStart(SDNode
*Node
, int depth
= 0) {
258 int next_depth
= depth
;
259 if (Node
->getOpcode() == ISD::CALLSEQ_START
)
260 next_depth
= depth
+ 1;
261 if (Node
->getOpcode() == ISD::CALLSEQ_END
) {
262 assert(depth
> 0 && "negative depth!");
266 next_depth
= depth
- 1;
268 if (Node
->use_empty())
269 return 0; // No CallSeqEnd
271 // The chain is usually at the end.
272 SDValue
TheChain(Node
, Node
->getNumValues()-1);
273 if (TheChain
.getValueType() != MVT::Other
) {
274 // Sometimes it's at the beginning.
275 TheChain
= SDValue(Node
, 0);
276 if (TheChain
.getValueType() != MVT::Other
) {
277 // Otherwise, hunt for it.
278 for (unsigned i
= 1, e
= Node
->getNumValues(); i
!= e
; ++i
)
279 if (Node
->getValueType(i
) == MVT::Other
) {
280 TheChain
= SDValue(Node
, i
);
284 // Otherwise, we walked into a node without a chain.
285 if (TheChain
.getValueType() != MVT::Other
)
290 for (SDNode::use_iterator UI
= Node
->use_begin(),
291 E
= Node
->use_end(); UI
!= E
; ++UI
) {
293 // Make sure to only follow users of our token chain.
295 for (unsigned i
= 0, e
= User
->getNumOperands(); i
!= e
; ++i
)
296 if (User
->getOperand(i
) == TheChain
)
297 if (SDNode
*Result
= FindCallEndFromCallStart(User
, next_depth
))
303 /// FindCallStartFromCallEnd - Given a chained node that is part of a call
304 /// sequence, find the CALLSEQ_START node that initiates the call sequence.
305 static SDNode
*FindCallStartFromCallEnd(SDNode
*Node
) {
307 assert(Node
&& "Didn't find callseq_start for a call??");
308 while (Node
->getOpcode() != ISD::CALLSEQ_START
|| nested
) {
309 Node
= Node
->getOperand(0).getNode();
310 assert(Node
->getOperand(0).getValueType() == MVT::Other
&&
311 "Node doesn't have a token chain argument!");
312 switch (Node
->getOpcode()) {
315 case ISD::CALLSEQ_START
:
318 Node
= Node
->getOperand(0).getNode();
321 case ISD::CALLSEQ_END
:
326 return (Node
->getOpcode() == ISD::CALLSEQ_START
) ? Node
: 0;
329 /// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to
330 /// see if any uses can reach Dest. If no dest operands can get to dest,
331 /// legalize them, legalize ourself, and return false, otherwise, return true.
333 /// Keep track of the nodes we fine that actually do lead to Dest in
334 /// NodesLeadingTo. This avoids retraversing them exponential number of times.
336 bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode
*N
, SDNode
*Dest
,
337 SmallPtrSet
<SDNode
*, 32> &NodesLeadingTo
) {
338 if (N
== Dest
) return true; // N certainly leads to Dest :)
340 // If we've already processed this node and it does lead to Dest, there is no
341 // need to reprocess it.
342 if (NodesLeadingTo
.count(N
)) return true;
344 // If the first result of this node has been already legalized, then it cannot
346 if (LegalizedNodes
.count(SDValue(N
, 0))) return false;
348 // Okay, this node has not already been legalized. Check and legalize all
349 // operands. If none lead to Dest, then we can legalize this node.
350 bool OperandsLeadToDest
= false;
351 for (unsigned i
= 0, e
= N
->getNumOperands(); i
!= e
; ++i
)
352 OperandsLeadToDest
|= // If an operand leads to Dest, so do we.
353 LegalizeAllNodesNotLeadingTo(N
->getOperand(i
).getNode(), Dest
,
356 if (OperandsLeadToDest
) {
357 NodesLeadingTo
.insert(N
);
361 // Okay, this node looks safe, legalize it and return false.
362 LegalizeOp(SDValue(N
, 0));
366 /// ExpandConstantFP - Expands the ConstantFP node to an integer constant or
367 /// a load from the constant pool.
368 static SDValue
ExpandConstantFP(ConstantFPSDNode
*CFP
, bool UseCP
,
369 SelectionDAG
&DAG
, const TargetLowering
&TLI
) {
371 DebugLoc dl
= CFP
->getDebugLoc();
373 // If a FP immediate is precise when represented as a float and if the
374 // target can do an extending load from float to double, we put it into
375 // the constant pool as a float, even if it's is statically typed as a
376 // double. This shrinks FP constants and canonicalizes them for targets where
377 // an FP extending load is the same cost as a normal load (such as on the x87
378 // fp stack or PPC FP unit).
379 EVT VT
= CFP
->getValueType(0);
380 ConstantFP
*LLVMC
= const_cast<ConstantFP
*>(CFP
->getConstantFPValue());
382 assert((VT
== MVT::f64
|| VT
== MVT::f32
) && "Invalid type expansion");
383 return DAG
.getConstant(LLVMC
->getValueAPF().bitcastToAPInt(),
384 (VT
== MVT::f64
) ? MVT::i64
: MVT::i32
);
389 while (SVT
!= MVT::f32
) {
390 SVT
= (MVT::SimpleValueType
)(SVT
.getSimpleVT().SimpleTy
- 1);
391 if (ConstantFPSDNode::isValueValidForType(SVT
, CFP
->getValueAPF()) &&
392 // Only do this if the target has a native EXTLOAD instruction from
394 TLI
.isLoadExtLegal(ISD::EXTLOAD
, SVT
) &&
395 TLI
.ShouldShrinkFPConstant(OrigVT
)) {
396 const Type
*SType
= SVT
.getTypeForEVT(*DAG
.getContext());
397 LLVMC
= cast
<ConstantFP
>(ConstantExpr::getFPTrunc(LLVMC
, SType
));
403 SDValue CPIdx
= DAG
.getConstantPool(LLVMC
, TLI
.getPointerTy());
404 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
406 return DAG
.getExtLoad(ISD::EXTLOAD
, dl
, OrigVT
,
408 CPIdx
, MachinePointerInfo::getConstantPool(),
409 VT
, false, false, Alignment
);
410 return DAG
.getLoad(OrigVT
, dl
, DAG
.getEntryNode(), CPIdx
,
411 MachinePointerInfo::getConstantPool(), false, false,
415 /// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores.
417 SDValue
ExpandUnalignedStore(StoreSDNode
*ST
, SelectionDAG
&DAG
,
418 const TargetLowering
&TLI
) {
419 SDValue Chain
= ST
->getChain();
420 SDValue Ptr
= ST
->getBasePtr();
421 SDValue Val
= ST
->getValue();
422 EVT VT
= Val
.getValueType();
423 int Alignment
= ST
->getAlignment();
424 DebugLoc dl
= ST
->getDebugLoc();
425 if (ST
->getMemoryVT().isFloatingPoint() ||
426 ST
->getMemoryVT().isVector()) {
427 EVT intVT
= EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits());
428 if (TLI
.isTypeLegal(intVT
)) {
429 // Expand to a bitconvert of the value to the integer type of the
430 // same size, then a (misaligned) int store.
431 // FIXME: Does not handle truncating floating point stores!
432 SDValue Result
= DAG
.getNode(ISD::BITCAST
, dl
, intVT
, Val
);
433 return DAG
.getStore(Chain
, dl
, Result
, Ptr
, ST
->getPointerInfo(),
434 ST
->isVolatile(), ST
->isNonTemporal(), Alignment
);
436 // Do a (aligned) store to a stack slot, then copy from the stack slot
437 // to the final destination using (unaligned) integer loads and stores.
438 EVT StoredVT
= ST
->getMemoryVT();
440 TLI
.getRegisterType(*DAG
.getContext(),
441 EVT::getIntegerVT(*DAG
.getContext(),
442 StoredVT
.getSizeInBits()));
443 unsigned StoredBytes
= StoredVT
.getSizeInBits() / 8;
444 unsigned RegBytes
= RegVT
.getSizeInBits() / 8;
445 unsigned NumRegs
= (StoredBytes
+ RegBytes
- 1) / RegBytes
;
447 // Make sure the stack slot is also aligned for the register type.
448 SDValue StackPtr
= DAG
.CreateStackTemporary(StoredVT
, RegVT
);
450 // Perform the original store, only redirected to the stack slot.
451 SDValue Store
= DAG
.getTruncStore(Chain
, dl
,
452 Val
, StackPtr
, MachinePointerInfo(),
453 StoredVT
, false, false, 0);
454 SDValue Increment
= DAG
.getConstant(RegBytes
, TLI
.getPointerTy());
455 SmallVector
<SDValue
, 8> Stores
;
458 // Do all but one copies using the full register width.
459 for (unsigned i
= 1; i
< NumRegs
; i
++) {
460 // Load one integer register's worth from the stack slot.
461 SDValue Load
= DAG
.getLoad(RegVT
, dl
, Store
, StackPtr
,
462 MachinePointerInfo(),
464 // Store it to the final location. Remember the store.
465 Stores
.push_back(DAG
.getStore(Load
.getValue(1), dl
, Load
, Ptr
,
466 ST
->getPointerInfo().getWithOffset(Offset
),
467 ST
->isVolatile(), ST
->isNonTemporal(),
468 MinAlign(ST
->getAlignment(), Offset
)));
469 // Increment the pointers.
471 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(), StackPtr
,
473 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
, Increment
);
476 // The last store may be partial. Do a truncating store. On big-endian
477 // machines this requires an extending load from the stack slot to ensure
478 // that the bits are in the right place.
479 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(),
480 8 * (StoredBytes
- Offset
));
482 // Load from the stack slot.
483 SDValue Load
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
, RegVT
, Store
, StackPtr
,
484 MachinePointerInfo(),
485 MemVT
, false, false, 0);
487 Stores
.push_back(DAG
.getTruncStore(Load
.getValue(1), dl
, Load
, Ptr
,
489 .getWithOffset(Offset
),
490 MemVT
, ST
->isVolatile(),
492 MinAlign(ST
->getAlignment(), Offset
)));
493 // The order of the stores doesn't matter - say it with a TokenFactor.
494 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, &Stores
[0],
497 assert(ST
->getMemoryVT().isInteger() &&
498 !ST
->getMemoryVT().isVector() &&
499 "Unaligned store of unknown type.");
500 // Get the half-size VT
501 EVT NewStoredVT
= ST
->getMemoryVT().getHalfSizedIntegerVT(*DAG
.getContext());
502 int NumBits
= NewStoredVT
.getSizeInBits();
503 int IncrementSize
= NumBits
/ 8;
505 // Divide the stored value in two parts.
506 SDValue ShiftAmount
= DAG
.getConstant(NumBits
,
507 TLI
.getShiftAmountTy(Val
.getValueType()));
509 SDValue Hi
= DAG
.getNode(ISD::SRL
, dl
, VT
, Val
, ShiftAmount
);
511 // Store the two parts
512 SDValue Store1
, Store2
;
513 Store1
= DAG
.getTruncStore(Chain
, dl
, TLI
.isLittleEndian()?Lo
:Hi
, Ptr
,
514 ST
->getPointerInfo(), NewStoredVT
,
515 ST
->isVolatile(), ST
->isNonTemporal(), Alignment
);
516 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
517 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
518 Alignment
= MinAlign(Alignment
, IncrementSize
);
519 Store2
= DAG
.getTruncStore(Chain
, dl
, TLI
.isLittleEndian()?Hi
:Lo
, Ptr
,
520 ST
->getPointerInfo().getWithOffset(IncrementSize
),
521 NewStoredVT
, ST
->isVolatile(), ST
->isNonTemporal(),
524 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Store1
, Store2
);
527 /// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads.
529 SDValue
ExpandUnalignedLoad(LoadSDNode
*LD
, SelectionDAG
&DAG
,
530 const TargetLowering
&TLI
) {
531 SDValue Chain
= LD
->getChain();
532 SDValue Ptr
= LD
->getBasePtr();
533 EVT VT
= LD
->getValueType(0);
534 EVT LoadedVT
= LD
->getMemoryVT();
535 DebugLoc dl
= LD
->getDebugLoc();
536 if (VT
.isFloatingPoint() || VT
.isVector()) {
537 EVT intVT
= EVT::getIntegerVT(*DAG
.getContext(), LoadedVT
.getSizeInBits());
538 if (TLI
.isTypeLegal(intVT
)) {
539 // Expand to a (misaligned) integer load of the same size,
540 // then bitconvert to floating point or vector.
541 SDValue newLoad
= DAG
.getLoad(intVT
, dl
, Chain
, Ptr
, LD
->getPointerInfo(),
543 LD
->isNonTemporal(), LD
->getAlignment());
544 SDValue Result
= DAG
.getNode(ISD::BITCAST
, dl
, LoadedVT
, newLoad
);
545 if (VT
.isFloatingPoint() && LoadedVT
!= VT
)
546 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
, VT
, Result
);
548 SDValue Ops
[] = { Result
, Chain
};
549 return DAG
.getMergeValues(Ops
, 2, dl
);
552 // Copy the value to a (aligned) stack slot using (unaligned) integer
553 // loads and stores, then do a (aligned) load from the stack slot.
554 EVT RegVT
= TLI
.getRegisterType(*DAG
.getContext(), intVT
);
555 unsigned LoadedBytes
= LoadedVT
.getSizeInBits() / 8;
556 unsigned RegBytes
= RegVT
.getSizeInBits() / 8;
557 unsigned NumRegs
= (LoadedBytes
+ RegBytes
- 1) / RegBytes
;
559 // Make sure the stack slot is also aligned for the register type.
560 SDValue StackBase
= DAG
.CreateStackTemporary(LoadedVT
, RegVT
);
562 SDValue Increment
= DAG
.getConstant(RegBytes
, TLI
.getPointerTy());
563 SmallVector
<SDValue
, 8> Stores
;
564 SDValue StackPtr
= StackBase
;
567 // Do all but one copies using the full register width.
568 for (unsigned i
= 1; i
< NumRegs
; i
++) {
569 // Load one integer register's worth from the original location.
570 SDValue Load
= DAG
.getLoad(RegVT
, dl
, Chain
, Ptr
,
571 LD
->getPointerInfo().getWithOffset(Offset
),
572 LD
->isVolatile(), LD
->isNonTemporal(),
573 MinAlign(LD
->getAlignment(), Offset
));
574 // Follow the load with a store to the stack slot. Remember the store.
575 Stores
.push_back(DAG
.getStore(Load
.getValue(1), dl
, Load
, StackPtr
,
576 MachinePointerInfo(), false, false, 0));
577 // Increment the pointers.
579 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
, Increment
);
580 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(), StackPtr
,
584 // The last copy may be partial. Do an extending load.
585 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(),
586 8 * (LoadedBytes
- Offset
));
587 SDValue Load
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
, RegVT
, Chain
, Ptr
,
588 LD
->getPointerInfo().getWithOffset(Offset
),
589 MemVT
, LD
->isVolatile(),
591 MinAlign(LD
->getAlignment(), Offset
));
592 // Follow the load with a store to the stack slot. Remember the store.
593 // On big-endian machines this requires a truncating store to ensure
594 // that the bits end up in the right place.
595 Stores
.push_back(DAG
.getTruncStore(Load
.getValue(1), dl
, Load
, StackPtr
,
596 MachinePointerInfo(), MemVT
,
599 // The order of the stores doesn't matter - say it with a TokenFactor.
600 SDValue TF
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, &Stores
[0],
603 // Finally, perform the original load only redirected to the stack slot.
604 Load
= DAG
.getExtLoad(LD
->getExtensionType(), dl
, VT
, TF
, StackBase
,
605 MachinePointerInfo(), LoadedVT
, false, false, 0);
607 // Callers expect a MERGE_VALUES node.
608 SDValue Ops
[] = { Load
, TF
};
609 return DAG
.getMergeValues(Ops
, 2, dl
);
611 assert(LoadedVT
.isInteger() && !LoadedVT
.isVector() &&
612 "Unaligned load of unsupported type.");
614 // Compute the new VT that is half the size of the old one. This is an
616 unsigned NumBits
= LoadedVT
.getSizeInBits();
618 NewLoadedVT
= EVT::getIntegerVT(*DAG
.getContext(), NumBits
/2);
621 unsigned Alignment
= LD
->getAlignment();
622 unsigned IncrementSize
= NumBits
/ 8;
623 ISD::LoadExtType HiExtType
= LD
->getExtensionType();
625 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
626 if (HiExtType
== ISD::NON_EXTLOAD
)
627 HiExtType
= ISD::ZEXTLOAD
;
629 // Load the value in two parts
631 if (TLI
.isLittleEndian()) {
632 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
, VT
, Chain
, Ptr
, LD
->getPointerInfo(),
633 NewLoadedVT
, LD
->isVolatile(),
634 LD
->isNonTemporal(), Alignment
);
635 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
636 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
637 Hi
= DAG
.getExtLoad(HiExtType
, dl
, VT
, Chain
, Ptr
,
638 LD
->getPointerInfo().getWithOffset(IncrementSize
),
639 NewLoadedVT
, LD
->isVolatile(),
640 LD
->isNonTemporal(), MinAlign(Alignment
,IncrementSize
));
642 Hi
= DAG
.getExtLoad(HiExtType
, dl
, VT
, Chain
, Ptr
, LD
->getPointerInfo(),
643 NewLoadedVT
, LD
->isVolatile(),
644 LD
->isNonTemporal(), Alignment
);
645 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
646 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
647 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
, VT
, Chain
, Ptr
,
648 LD
->getPointerInfo().getWithOffset(IncrementSize
),
649 NewLoadedVT
, LD
->isVolatile(),
650 LD
->isNonTemporal(), MinAlign(Alignment
,IncrementSize
));
653 // aggregate the two parts
654 SDValue ShiftAmount
= DAG
.getConstant(NumBits
,
655 TLI
.getShiftAmountTy(Hi
.getValueType()));
656 SDValue Result
= DAG
.getNode(ISD::SHL
, dl
, VT
, Hi
, ShiftAmount
);
657 Result
= DAG
.getNode(ISD::OR
, dl
, VT
, Result
, Lo
);
659 SDValue TF
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
662 SDValue Ops
[] = { Result
, TF
};
663 return DAG
.getMergeValues(Ops
, 2, dl
);
666 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
667 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
668 /// is necessary to spill the vector being inserted into to memory, perform
669 /// the insert there, and then read the result back.
670 SDValue
SelectionDAGLegalize::
671 PerformInsertVectorEltInMemory(SDValue Vec
, SDValue Val
, SDValue Idx
,
677 // If the target doesn't support this, we have to spill the input vector
678 // to a temporary stack slot, update the element, then reload it. This is
679 // badness. We could also load the value into a vector register (either
680 // with a "move to register" or "extload into register" instruction, then
681 // permute it into place, if the idx is a constant and if the idx is
682 // supported by the target.
683 EVT VT
= Tmp1
.getValueType();
684 EVT EltVT
= VT
.getVectorElementType();
685 EVT IdxVT
= Tmp3
.getValueType();
686 EVT PtrVT
= TLI
.getPointerTy();
687 SDValue StackPtr
= DAG
.CreateStackTemporary(VT
);
689 int SPFI
= cast
<FrameIndexSDNode
>(StackPtr
.getNode())->getIndex();
692 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Tmp1
, StackPtr
,
693 MachinePointerInfo::getFixedStack(SPFI
),
696 // Truncate or zero extend offset to target pointer type.
697 unsigned CastOpc
= IdxVT
.bitsGT(PtrVT
) ? ISD::TRUNCATE
: ISD::ZERO_EXTEND
;
698 Tmp3
= DAG
.getNode(CastOpc
, dl
, PtrVT
, Tmp3
);
699 // Add the offset to the index.
700 unsigned EltSize
= EltVT
.getSizeInBits()/8;
701 Tmp3
= DAG
.getNode(ISD::MUL
, dl
, IdxVT
, Tmp3
,DAG
.getConstant(EltSize
, IdxVT
));
702 SDValue StackPtr2
= DAG
.getNode(ISD::ADD
, dl
, IdxVT
, Tmp3
, StackPtr
);
703 // Store the scalar value.
704 Ch
= DAG
.getTruncStore(Ch
, dl
, Tmp2
, StackPtr2
, MachinePointerInfo(), EltVT
,
706 // Load the updated vector.
707 return DAG
.getLoad(VT
, dl
, Ch
, StackPtr
,
708 MachinePointerInfo::getFixedStack(SPFI
), false, false, 0);
712 SDValue
SelectionDAGLegalize::
713 ExpandINSERT_VECTOR_ELT(SDValue Vec
, SDValue Val
, SDValue Idx
, DebugLoc dl
) {
714 if (ConstantSDNode
*InsertPos
= dyn_cast
<ConstantSDNode
>(Idx
)) {
715 // SCALAR_TO_VECTOR requires that the type of the value being inserted
716 // match the element type of the vector being created, except for
717 // integers in which case the inserted value can be over width.
718 EVT EltVT
= Vec
.getValueType().getVectorElementType();
719 if (Val
.getValueType() == EltVT
||
720 (EltVT
.isInteger() && Val
.getValueType().bitsGE(EltVT
))) {
721 SDValue ScVec
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
,
722 Vec
.getValueType(), Val
);
724 unsigned NumElts
= Vec
.getValueType().getVectorNumElements();
725 // We generate a shuffle of InVec and ScVec, so the shuffle mask
726 // should be 0,1,2,3,4,5... with the appropriate element replaced with
728 SmallVector
<int, 8> ShufOps
;
729 for (unsigned i
= 0; i
!= NumElts
; ++i
)
730 ShufOps
.push_back(i
!= InsertPos
->getZExtValue() ? i
: NumElts
);
732 return DAG
.getVectorShuffle(Vec
.getValueType(), dl
, Vec
, ScVec
,
736 return PerformInsertVectorEltInMemory(Vec
, Val
, Idx
, dl
);
739 SDValue
SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode
* ST
) {
740 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
741 // FIXME: We shouldn't do this for TargetConstantFP's.
742 // FIXME: move this to the DAG Combiner! Note that we can't regress due
743 // to phase ordering between legalized code and the dag combiner. This
744 // probably means that we need to integrate dag combiner and legalizer
746 // We generally can't do this one for long doubles.
747 SDValue Tmp1
= ST
->getChain();
748 SDValue Tmp2
= ST
->getBasePtr();
750 unsigned Alignment
= ST
->getAlignment();
751 bool isVolatile
= ST
->isVolatile();
752 bool isNonTemporal
= ST
->isNonTemporal();
753 DebugLoc dl
= ST
->getDebugLoc();
754 if (ConstantFPSDNode
*CFP
= dyn_cast
<ConstantFPSDNode
>(ST
->getValue())) {
755 if (CFP
->getValueType(0) == MVT::f32
&&
756 getTypeAction(MVT::i32
) == Legal
) {
757 Tmp3
= DAG
.getConstant(CFP
->getValueAPF().
758 bitcastToAPInt().zextOrTrunc(32),
760 return DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getPointerInfo(),
761 isVolatile
, isNonTemporal
, Alignment
);
764 if (CFP
->getValueType(0) == MVT::f64
) {
765 // If this target supports 64-bit registers, do a single 64-bit store.
766 if (getTypeAction(MVT::i64
) == Legal
) {
767 Tmp3
= DAG
.getConstant(CFP
->getValueAPF().bitcastToAPInt().
768 zextOrTrunc(64), MVT::i64
);
769 return DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getPointerInfo(),
770 isVolatile
, isNonTemporal
, Alignment
);
773 if (getTypeAction(MVT::i32
) == Legal
&& !ST
->isVolatile()) {
774 // Otherwise, if the target supports 32-bit registers, use 2 32-bit
775 // stores. If the target supports neither 32- nor 64-bits, this
776 // xform is certainly not worth it.
777 const APInt
&IntVal
=CFP
->getValueAPF().bitcastToAPInt();
778 SDValue Lo
= DAG
.getConstant(IntVal
.trunc(32), MVT::i32
);
779 SDValue Hi
= DAG
.getConstant(IntVal
.lshr(32).trunc(32), MVT::i32
);
780 if (TLI
.isBigEndian()) std::swap(Lo
, Hi
);
782 Lo
= DAG
.getStore(Tmp1
, dl
, Lo
, Tmp2
, ST
->getPointerInfo(), isVolatile
,
783 isNonTemporal
, Alignment
);
784 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
785 DAG
.getIntPtrConstant(4));
786 Hi
= DAG
.getStore(Tmp1
, dl
, Hi
, Tmp2
,
787 ST
->getPointerInfo().getWithOffset(4),
788 isVolatile
, isNonTemporal
, MinAlign(Alignment
, 4U));
790 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
, Hi
);
794 return SDValue(0, 0);
797 /// LegalizeOp - We know that the specified value has a legal type, and
798 /// that its operands are legal. Now ensure that the operation itself
799 /// is legal, recursively ensuring that the operands' operations remain
801 SDValue
SelectionDAGLegalize::LegalizeOp(SDValue Op
) {
802 if (Op
.getOpcode() == ISD::TargetConstant
) // Allow illegal target nodes.
805 SDNode
*Node
= Op
.getNode();
806 DebugLoc dl
= Node
->getDebugLoc();
808 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
809 assert(getTypeAction(Node
->getValueType(i
)) == Legal
&&
810 "Unexpected illegal type!");
812 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
)
813 assert((isTypeLegal(Node
->getOperand(i
).getValueType()) ||
814 Node
->getOperand(i
).getOpcode() == ISD::TargetConstant
) &&
815 "Unexpected illegal type!");
817 // Note that LegalizeOp may be reentered even from single-use nodes, which
818 // means that we always must cache transformed nodes.
819 DenseMap
<SDValue
, SDValue
>::iterator I
= LegalizedNodes
.find(Op
);
820 if (I
!= LegalizedNodes
.end()) return I
->second
;
822 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
;
824 bool isCustom
= false;
826 // Figure out the correct action; the way to query this varies by opcode
827 TargetLowering::LegalizeAction Action
= TargetLowering::Legal
;
828 bool SimpleFinishLegalizing
= true;
829 switch (Node
->getOpcode()) {
830 case ISD::INTRINSIC_W_CHAIN
:
831 case ISD::INTRINSIC_WO_CHAIN
:
832 case ISD::INTRINSIC_VOID
:
835 Action
= TLI
.getOperationAction(Node
->getOpcode(), MVT::Other
);
837 case ISD::SINT_TO_FP
:
838 case ISD::UINT_TO_FP
:
839 case ISD::EXTRACT_VECTOR_ELT
:
840 Action
= TLI
.getOperationAction(Node
->getOpcode(),
841 Node
->getOperand(0).getValueType());
843 case ISD::FP_ROUND_INREG
:
844 case ISD::SIGN_EXTEND_INREG
: {
845 EVT InnerType
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
846 Action
= TLI
.getOperationAction(Node
->getOpcode(), InnerType
);
852 unsigned CCOperand
= Node
->getOpcode() == ISD::SELECT_CC
? 4 :
853 Node
->getOpcode() == ISD::SETCC
? 2 : 1;
854 unsigned CompareOperand
= Node
->getOpcode() == ISD::BR_CC
? 2 : 0;
855 EVT OpVT
= Node
->getOperand(CompareOperand
).getValueType();
856 ISD::CondCode CCCode
=
857 cast
<CondCodeSDNode
>(Node
->getOperand(CCOperand
))->get();
858 Action
= TLI
.getCondCodeAction(CCCode
, OpVT
);
859 if (Action
== TargetLowering::Legal
) {
860 if (Node
->getOpcode() == ISD::SELECT_CC
)
861 Action
= TLI
.getOperationAction(Node
->getOpcode(),
862 Node
->getValueType(0));
864 Action
= TLI
.getOperationAction(Node
->getOpcode(), OpVT
);
870 // FIXME: Model these properly. LOAD and STORE are complicated, and
871 // STORE expects the unlegalized operand in some cases.
872 SimpleFinishLegalizing
= false;
874 case ISD::CALLSEQ_START
:
875 case ISD::CALLSEQ_END
:
876 // FIXME: This shouldn't be necessary. These nodes have special properties
877 // dealing with the recursive nature of legalization. Removing this
878 // special case should be done as part of making LegalizeDAG non-recursive.
879 SimpleFinishLegalizing
= false;
881 case ISD::EXTRACT_ELEMENT
:
882 case ISD::FLT_ROUNDS_
:
890 case ISD::MERGE_VALUES
:
892 case ISD::FRAME_TO_ARGS_OFFSET
:
893 case ISD::EH_SJLJ_SETJMP
:
894 case ISD::EH_SJLJ_LONGJMP
:
895 case ISD::EH_SJLJ_DISPATCHSETUP
:
896 // These operations lie about being legal: when they claim to be legal,
897 // they should actually be expanded.
898 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
899 if (Action
== TargetLowering::Legal
)
900 Action
= TargetLowering::Expand
;
902 case ISD::TRAMPOLINE
:
904 case ISD::RETURNADDR
:
905 // These operations lie about being legal: when they claim to be legal,
906 // they should actually be custom-lowered.
907 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
908 if (Action
== TargetLowering::Legal
)
909 Action
= TargetLowering::Custom
;
911 case ISD::BUILD_VECTOR
:
912 // A weird case: legalization for BUILD_VECTOR never legalizes the
914 // FIXME: This really sucks... changing it isn't semantically incorrect,
915 // but it massively pessimizes the code for floating-point BUILD_VECTORs
916 // because ConstantFP operands get legalized into constant pool loads
917 // before the BUILD_VECTOR code can see them. It doesn't usually bite,
918 // though, because BUILD_VECTORS usually get lowered into other nodes
919 // which get legalized properly.
920 SimpleFinishLegalizing
= false;
923 if (Node
->getOpcode() >= ISD::BUILTIN_OP_END
) {
924 Action
= TargetLowering::Legal
;
926 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
931 if (SimpleFinishLegalizing
) {
932 SmallVector
<SDValue
, 8> Ops
, ResultVals
;
933 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
)
934 Ops
.push_back(LegalizeOp(Node
->getOperand(i
)));
935 switch (Node
->getOpcode()) {
942 assert(LastCALLSEQ
.size() == 1 && "branch inside CALLSEQ_BEGIN/END?");
943 // Branches tweak the chain to include LastCALLSEQ
944 Ops
[0] = DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Ops
[0],
946 Ops
[0] = LegalizeOp(Ops
[0]);
947 setLastCALLSEQ(DAG
.getEntryNode());
954 // Legalizing shifts/rotates requires adjusting the shift amount
955 // to the appropriate width.
956 if (!Ops
[1].getValueType().isVector())
957 Ops
[1] = LegalizeOp(DAG
.getShiftAmountOperand(Ops
[0].getValueType(),
963 // Legalizing shifts/rotates requires adjusting the shift amount
964 // to the appropriate width.
965 if (!Ops
[2].getValueType().isVector())
966 Ops
[2] = LegalizeOp(DAG
.getShiftAmountOperand(Ops
[0].getValueType(),
971 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(), Ops
.data(),
974 case TargetLowering::Legal
:
975 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
976 ResultVals
.push_back(Result
.getValue(i
));
978 case TargetLowering::Custom
:
979 // FIXME: The handling for custom lowering with multiple results is
981 Tmp1
= TLI
.LowerOperation(Result
, DAG
);
982 if (Tmp1
.getNode()) {
983 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
) {
985 ResultVals
.push_back(Tmp1
);
987 ResultVals
.push_back(Tmp1
.getValue(i
));
993 case TargetLowering::Expand
:
994 ExpandNode(Result
.getNode(), ResultVals
);
996 case TargetLowering::Promote
:
997 PromoteNode(Result
.getNode(), ResultVals
);
1000 if (!ResultVals
.empty()) {
1001 for (unsigned i
= 0, e
= ResultVals
.size(); i
!= e
; ++i
) {
1002 if (ResultVals
[i
] != SDValue(Node
, i
))
1003 ResultVals
[i
] = LegalizeOp(ResultVals
[i
]);
1004 AddLegalizedOperand(SDValue(Node
, i
), ResultVals
[i
]);
1006 return ResultVals
[Op
.getResNo()];
1010 switch (Node
->getOpcode()) {
1017 assert(0 && "Do not know how to legalize this operator!");
1019 case ISD::BUILD_VECTOR
:
1020 switch (TLI
.getOperationAction(ISD::BUILD_VECTOR
, Node
->getValueType(0))) {
1021 default: assert(0 && "This action is not supported yet!");
1022 case TargetLowering::Custom
:
1023 Tmp3
= TLI
.LowerOperation(Result
, DAG
);
1024 if (Tmp3
.getNode()) {
1029 case TargetLowering::Expand
:
1030 Result
= ExpandBUILD_VECTOR(Result
.getNode());
1034 case ISD::CALLSEQ_START
: {
1035 SDNode
*CallEnd
= FindCallEndFromCallStart(Node
);
1036 assert(CallEnd
&& "didn't find CALLSEQ_END!");
1038 // Recursively Legalize all of the inputs of the call end that do not lead
1039 // to this call start. This ensures that any libcalls that need be inserted
1040 // are inserted *before* the CALLSEQ_START.
1041 {SmallPtrSet
<SDNode
*, 32> NodesLeadingTo
;
1042 for (unsigned i
= 0, e
= CallEnd
->getNumOperands(); i
!= e
; ++i
)
1043 LegalizeAllNodesNotLeadingTo(CallEnd
->getOperand(i
).getNode(), Node
,
1047 // Now that we have legalized all of the inputs (which may have inserted
1048 // libcalls), create the new CALLSEQ_START node.
1049 Tmp1
= LegalizeOp(Node
->getOperand(0)); // Legalize the chain.
1051 // Merge in the last call to ensure that this call starts after the last
1053 if (getLastCALLSEQ().getOpcode() != ISD::EntryToken
) {
1054 Tmp1
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1055 Tmp1
, getLastCALLSEQ());
1056 Tmp1
= LegalizeOp(Tmp1
);
1059 // Do not try to legalize the target-specific arguments (#1+).
1060 if (Tmp1
!= Node
->getOperand(0)) {
1061 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1063 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(), &Ops
[0],
1064 Ops
.size()), Result
.getResNo());
1067 // Remember that the CALLSEQ_START is legalized.
1068 AddLegalizedOperand(Op
.getValue(0), Result
);
1069 if (Node
->getNumValues() == 2) // If this has a flag result, remember it.
1070 AddLegalizedOperand(Op
.getValue(1), Result
.getValue(1));
1072 // Now that the callseq_start and all of the non-call nodes above this call
1073 // sequence have been legalized, legalize the call itself. During this
1074 // process, no libcalls can/will be inserted, guaranteeing that no calls
1076 // Note that we are selecting this call!
1077 setLastCALLSEQ(SDValue(CallEnd
, 0));
1079 // Legalize the call, starting from the CALLSEQ_END.
1080 LegalizeOp(getLastCALLSEQ());
1083 case ISD::CALLSEQ_END
:
1085 SDNode
*myCALLSEQ_BEGIN
= FindCallStartFromCallEnd(Node
);
1087 // If the CALLSEQ_START node hasn't been legalized first, legalize it.
1088 // This will cause this node to be legalized as well as handling libcalls
1090 if (getLastCALLSEQ().getNode() != Node
) {
1091 LegalizeOp(SDValue(myCALLSEQ_BEGIN
, 0));
1092 DenseMap
<SDValue
, SDValue
>::iterator I
= LegalizedNodes
.find(Op
);
1093 assert(I
!= LegalizedNodes
.end() &&
1094 "Legalizing the call start should have legalized this node!");
1098 pushLastCALLSEQ(SDValue(myCALLSEQ_BEGIN
, 0));
1101 // Otherwise, the call start has been legalized and everything is going
1102 // according to plan. Just legalize ourselves normally here.
1103 Tmp1
= LegalizeOp(Node
->getOperand(0)); // Legalize the chain.
1104 // Do not try to legalize the target-specific arguments (#1+), except for
1105 // an optional flag input.
1106 if (Node
->getOperand(Node
->getNumOperands()-1).getValueType() != MVT::Glue
){
1107 if (Tmp1
!= Node
->getOperand(0)) {
1108 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1110 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1111 &Ops
[0], Ops
.size()),
1115 Tmp2
= LegalizeOp(Node
->getOperand(Node
->getNumOperands()-1));
1116 if (Tmp1
!= Node
->getOperand(0) ||
1117 Tmp2
!= Node
->getOperand(Node
->getNumOperands()-1)) {
1118 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1121 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1122 &Ops
[0], Ops
.size()),
1126 // This finishes up call legalization.
1129 // If the CALLSEQ_END node has a flag, remember that we legalized it.
1130 AddLegalizedOperand(SDValue(Node
, 0), Result
.getValue(0));
1131 if (Node
->getNumValues() == 2)
1132 AddLegalizedOperand(SDValue(Node
, 1), Result
.getValue(1));
1133 return Result
.getValue(Op
.getResNo());
1135 LoadSDNode
*LD
= cast
<LoadSDNode
>(Node
);
1136 Tmp1
= LegalizeOp(LD
->getChain()); // Legalize the chain.
1137 Tmp2
= LegalizeOp(LD
->getBasePtr()); // Legalize the base pointer.
1139 ISD::LoadExtType ExtType
= LD
->getExtensionType();
1140 if (ExtType
== ISD::NON_EXTLOAD
) {
1141 EVT VT
= Node
->getValueType(0);
1142 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1143 Tmp1
, Tmp2
, LD
->getOffset()),
1145 Tmp3
= Result
.getValue(0);
1146 Tmp4
= Result
.getValue(1);
1148 switch (TLI
.getOperationAction(Node
->getOpcode(), VT
)) {
1149 default: assert(0 && "This action is not supported yet!");
1150 case TargetLowering::Legal
:
1151 // If this is an unaligned load and the target doesn't support it,
1153 if (!TLI
.allowsUnalignedMemoryAccesses(LD
->getMemoryVT())) {
1154 const Type
*Ty
= LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1155 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1156 if (LD
->getAlignment() < ABIAlignment
){
1157 Result
= ExpandUnalignedLoad(cast
<LoadSDNode
>(Result
.getNode()),
1159 Tmp3
= Result
.getOperand(0);
1160 Tmp4
= Result
.getOperand(1);
1161 Tmp3
= LegalizeOp(Tmp3
);
1162 Tmp4
= LegalizeOp(Tmp4
);
1166 case TargetLowering::Custom
:
1167 Tmp1
= TLI
.LowerOperation(Tmp3
, DAG
);
1168 if (Tmp1
.getNode()) {
1169 Tmp3
= LegalizeOp(Tmp1
);
1170 Tmp4
= LegalizeOp(Tmp1
.getValue(1));
1173 case TargetLowering::Promote
: {
1174 // Only promote a load of vector type to another.
1175 assert(VT
.isVector() && "Cannot promote this load!");
1176 // Change base type to a different vector type.
1177 EVT NVT
= TLI
.getTypeToPromoteTo(Node
->getOpcode(), VT
);
1179 Tmp1
= DAG
.getLoad(NVT
, dl
, Tmp1
, Tmp2
, LD
->getPointerInfo(),
1180 LD
->isVolatile(), LD
->isNonTemporal(),
1181 LD
->getAlignment());
1182 Tmp3
= LegalizeOp(DAG
.getNode(ISD::BITCAST
, dl
, VT
, Tmp1
));
1183 Tmp4
= LegalizeOp(Tmp1
.getValue(1));
1187 // Since loads produce two values, make sure to remember that we
1188 // legalized both of them.
1189 AddLegalizedOperand(SDValue(Node
, 0), Tmp3
);
1190 AddLegalizedOperand(SDValue(Node
, 1), Tmp4
);
1191 return Op
.getResNo() ? Tmp4
: Tmp3
;
1194 EVT SrcVT
= LD
->getMemoryVT();
1195 unsigned SrcWidth
= SrcVT
.getSizeInBits();
1196 unsigned Alignment
= LD
->getAlignment();
1197 bool isVolatile
= LD
->isVolatile();
1198 bool isNonTemporal
= LD
->isNonTemporal();
1200 if (SrcWidth
!= SrcVT
.getStoreSizeInBits() &&
1201 // Some targets pretend to have an i1 loading operation, and actually
1202 // load an i8. This trick is correct for ZEXTLOAD because the top 7
1203 // bits are guaranteed to be zero; it helps the optimizers understand
1204 // that these bits are zero. It is also useful for EXTLOAD, since it
1205 // tells the optimizers that those bits are undefined. It would be
1206 // nice to have an effective generic way of getting these benefits...
1207 // Until such a way is found, don't insist on promoting i1 here.
1208 (SrcVT
!= MVT::i1
||
1209 TLI
.getLoadExtAction(ExtType
, MVT::i1
) == TargetLowering::Promote
)) {
1210 // Promote to a byte-sized load if not loading an integral number of
1211 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
1212 unsigned NewWidth
= SrcVT
.getStoreSizeInBits();
1213 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(), NewWidth
);
1216 // The extra bits are guaranteed to be zero, since we stored them that
1217 // way. A zext load from NVT thus automatically gives zext from SrcVT.
1219 ISD::LoadExtType NewExtType
=
1220 ExtType
== ISD::ZEXTLOAD
? ISD::ZEXTLOAD
: ISD::EXTLOAD
;
1222 Result
= DAG
.getExtLoad(NewExtType
, dl
, Node
->getValueType(0),
1223 Tmp1
, Tmp2
, LD
->getPointerInfo(),
1224 NVT
, isVolatile
, isNonTemporal
, Alignment
);
1226 Ch
= Result
.getValue(1); // The chain.
1228 if (ExtType
== ISD::SEXTLOAD
)
1229 // Having the top bits zero doesn't help when sign extending.
1230 Result
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
,
1231 Result
.getValueType(),
1232 Result
, DAG
.getValueType(SrcVT
));
1233 else if (ExtType
== ISD::ZEXTLOAD
|| NVT
== Result
.getValueType())
1234 // All the top bits are guaranteed to be zero - inform the optimizers.
1235 Result
= DAG
.getNode(ISD::AssertZext
, dl
,
1236 Result
.getValueType(), Result
,
1237 DAG
.getValueType(SrcVT
));
1239 Tmp1
= LegalizeOp(Result
);
1240 Tmp2
= LegalizeOp(Ch
);
1241 } else if (SrcWidth
& (SrcWidth
- 1)) {
1242 // If not loading a power-of-2 number of bits, expand as two loads.
1243 assert(!SrcVT
.isVector() && "Unsupported extload!");
1244 unsigned RoundWidth
= 1 << Log2_32(SrcWidth
);
1245 assert(RoundWidth
< SrcWidth
);
1246 unsigned ExtraWidth
= SrcWidth
- RoundWidth
;
1247 assert(ExtraWidth
< RoundWidth
);
1248 assert(!(RoundWidth
% 8) && !(ExtraWidth
% 8) &&
1249 "Load size not an integral number of bytes!");
1250 EVT RoundVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundWidth
);
1251 EVT ExtraVT
= EVT::getIntegerVT(*DAG
.getContext(), ExtraWidth
);
1253 unsigned IncrementSize
;
1255 if (TLI
.isLittleEndian()) {
1256 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
1257 // Load the bottom RoundWidth bits.
1258 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, dl
, Node
->getValueType(0),
1260 LD
->getPointerInfo(), RoundVT
, isVolatile
,
1261 isNonTemporal
, Alignment
);
1263 // Load the remaining ExtraWidth bits.
1264 IncrementSize
= RoundWidth
/ 8;
1265 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1266 DAG
.getIntPtrConstant(IncrementSize
));
1267 Hi
= DAG
.getExtLoad(ExtType
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
1268 LD
->getPointerInfo().getWithOffset(IncrementSize
),
1269 ExtraVT
, isVolatile
, isNonTemporal
,
1270 MinAlign(Alignment
, IncrementSize
));
1272 // Build a factor node to remember that this load is independent of
1274 Ch
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
1277 // Move the top bits to the right place.
1278 Hi
= DAG
.getNode(ISD::SHL
, dl
, Hi
.getValueType(), Hi
,
1279 DAG
.getConstant(RoundWidth
,
1280 TLI
.getShiftAmountTy(Hi
.getValueType())));
1282 // Join the hi and lo parts.
1283 Result
= DAG
.getNode(ISD::OR
, dl
, Node
->getValueType(0), Lo
, Hi
);
1285 // Big endian - avoid unaligned loads.
1286 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
1287 // Load the top RoundWidth bits.
1288 Hi
= DAG
.getExtLoad(ExtType
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
1289 LD
->getPointerInfo(), RoundVT
, isVolatile
,
1290 isNonTemporal
, Alignment
);
1292 // Load the remaining ExtraWidth bits.
1293 IncrementSize
= RoundWidth
/ 8;
1294 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1295 DAG
.getIntPtrConstant(IncrementSize
));
1296 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
,
1297 dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
1298 LD
->getPointerInfo().getWithOffset(IncrementSize
),
1299 ExtraVT
, isVolatile
, isNonTemporal
,
1300 MinAlign(Alignment
, IncrementSize
));
1302 // Build a factor node to remember that this load is independent of
1304 Ch
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
1307 // Move the top bits to the right place.
1308 Hi
= DAG
.getNode(ISD::SHL
, dl
, Hi
.getValueType(), Hi
,
1309 DAG
.getConstant(ExtraWidth
,
1310 TLI
.getShiftAmountTy(Hi
.getValueType())));
1312 // Join the hi and lo parts.
1313 Result
= DAG
.getNode(ISD::OR
, dl
, Node
->getValueType(0), Lo
, Hi
);
1316 Tmp1
= LegalizeOp(Result
);
1317 Tmp2
= LegalizeOp(Ch
);
1319 switch (TLI
.getLoadExtAction(ExtType
, SrcVT
)) {
1320 default: assert(0 && "This action is not supported yet!");
1321 case TargetLowering::Custom
:
1324 case TargetLowering::Legal
:
1325 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1326 Tmp1
, Tmp2
, LD
->getOffset()),
1328 Tmp1
= Result
.getValue(0);
1329 Tmp2
= Result
.getValue(1);
1332 Tmp3
= TLI
.LowerOperation(Result
, DAG
);
1333 if (Tmp3
.getNode()) {
1334 Tmp1
= LegalizeOp(Tmp3
);
1335 Tmp2
= LegalizeOp(Tmp3
.getValue(1));
1338 // If this is an unaligned load and the target doesn't support it,
1340 if (!TLI
.allowsUnalignedMemoryAccesses(LD
->getMemoryVT())) {
1342 LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1343 unsigned ABIAlignment
=
1344 TLI
.getTargetData()->getABITypeAlignment(Ty
);
1345 if (LD
->getAlignment() < ABIAlignment
){
1346 Result
= ExpandUnalignedLoad(cast
<LoadSDNode
>(Result
.getNode()),
1348 Tmp1
= Result
.getOperand(0);
1349 Tmp2
= Result
.getOperand(1);
1350 Tmp1
= LegalizeOp(Tmp1
);
1351 Tmp2
= LegalizeOp(Tmp2
);
1356 case TargetLowering::Expand
:
1357 if (!TLI
.isLoadExtLegal(ISD::EXTLOAD
, SrcVT
) && isTypeLegal(SrcVT
)) {
1358 SDValue Load
= DAG
.getLoad(SrcVT
, dl
, Tmp1
, Tmp2
,
1359 LD
->getPointerInfo(),
1360 LD
->isVolatile(), LD
->isNonTemporal(),
1361 LD
->getAlignment());
1365 ExtendOp
= (SrcVT
.isFloatingPoint() ?
1366 ISD::FP_EXTEND
: ISD::ANY_EXTEND
);
1368 case ISD::SEXTLOAD
: ExtendOp
= ISD::SIGN_EXTEND
; break;
1369 case ISD::ZEXTLOAD
: ExtendOp
= ISD::ZERO_EXTEND
; break;
1370 default: llvm_unreachable("Unexpected extend load type!");
1372 Result
= DAG
.getNode(ExtendOp
, dl
, Node
->getValueType(0), Load
);
1373 Tmp1
= LegalizeOp(Result
); // Relegalize new nodes.
1374 Tmp2
= LegalizeOp(Load
.getValue(1));
1378 // If this is a promoted vector load, and the vector element types are
1379 // legal, then scalarize it.
1380 if (ExtType
== ISD::EXTLOAD
&& SrcVT
.isVector() &&
1381 isTypeLegal(Node
->getValueType(0).getScalarType())) {
1382 SmallVector
<SDValue
, 8> LoadVals
;
1383 SmallVector
<SDValue
, 8> LoadChains
;
1384 unsigned NumElem
= SrcVT
.getVectorNumElements();
1385 unsigned Stride
= SrcVT
.getScalarType().getSizeInBits()/8;
1387 for (unsigned Idx
=0; Idx
<NumElem
; Idx
++) {
1388 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1389 DAG
.getIntPtrConstant(Stride
));
1390 SDValue ScalarLoad
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
,
1391 Node
->getValueType(0).getScalarType(),
1392 Tmp1
, Tmp2
, LD
->getPointerInfo().getWithOffset(Idx
* Stride
),
1393 SrcVT
.getScalarType(),
1394 LD
->isVolatile(), LD
->isNonTemporal(),
1395 LD
->getAlignment());
1397 LoadVals
.push_back(ScalarLoad
.getValue(0));
1398 LoadChains
.push_back(ScalarLoad
.getValue(1));
1400 Result
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1401 &LoadChains
[0], LoadChains
.size());
1402 SDValue ValRes
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
,
1403 Node
->getValueType(0), &LoadVals
[0], LoadVals
.size());
1405 Tmp1
= LegalizeOp(ValRes
); // Relegalize new nodes.
1406 Tmp2
= LegalizeOp(Result
.getValue(0)); // Relegalize new nodes.
1410 // If this is a promoted vector load, and the vector element types are
1411 // illegal, create the promoted vector from bitcasted segments.
1412 if (ExtType
== ISD::EXTLOAD
&& SrcVT
.isVector()) {
1413 EVT MemElemTy
= Node
->getValueType(0).getScalarType();
1414 EVT SrcSclrTy
= SrcVT
.getScalarType();
1415 unsigned SizeRatio
=
1416 (MemElemTy
.getSizeInBits() / SrcSclrTy
.getSizeInBits());
1418 SmallVector
<SDValue
, 8> LoadVals
;
1419 SmallVector
<SDValue
, 8> LoadChains
;
1420 unsigned NumElem
= SrcVT
.getVectorNumElements();
1421 unsigned Stride
= SrcVT
.getScalarType().getSizeInBits()/8;
1423 for (unsigned Idx
=0; Idx
<NumElem
; Idx
++) {
1424 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1425 DAG
.getIntPtrConstant(Stride
));
1426 SDValue ScalarLoad
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
,
1427 SrcVT
.getScalarType(),
1428 Tmp1
, Tmp2
, LD
->getPointerInfo().getWithOffset(Idx
* Stride
),
1429 SrcVT
.getScalarType(),
1430 LD
->isVolatile(), LD
->isNonTemporal(),
1431 LD
->getAlignment());
1432 if (TLI
.isBigEndian()) {
1433 // MSB (which is garbage, comes first)
1434 LoadVals
.push_back(ScalarLoad
.getValue(0));
1435 for (unsigned i
= 0; i
<SizeRatio
-1; ++i
)
1436 LoadVals
.push_back(DAG
.getUNDEF(SrcVT
.getScalarType()));
1438 // LSB (which is data, comes first)
1439 for (unsigned i
= 0; i
<SizeRatio
-1; ++i
)
1440 LoadVals
.push_back(DAG
.getUNDEF(SrcVT
.getScalarType()));
1441 LoadVals
.push_back(ScalarLoad
.getValue(0));
1443 LoadChains
.push_back(ScalarLoad
.getValue(1));
1446 Result
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1447 &LoadChains
[0], LoadChains
.size());
1448 EVT TempWideVector
= EVT::getVectorVT(*DAG
.getContext(),
1449 SrcVT
.getScalarType(), NumElem
*SizeRatio
);
1450 SDValue ValRes
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
,
1451 TempWideVector
, &LoadVals
[0], LoadVals
.size());
1453 // Cast to the correct type
1454 ValRes
= DAG
.getNode(ISD::BITCAST
, dl
, Node
->getValueType(0), ValRes
);
1456 Tmp1
= LegalizeOp(ValRes
); // Relegalize new nodes.
1457 Tmp2
= LegalizeOp(Result
.getValue(0)); // Relegalize new nodes.
1462 // FIXME: This does not work for vectors on most targets. Sign- and
1463 // zero-extend operations are currently folded into extending loads,
1464 // whether they are legal or not, and then we end up here without any
1465 // support for legalizing them.
1466 assert(ExtType
!= ISD::EXTLOAD
&&
1467 "EXTLOAD should always be supported!");
1468 // Turn the unsupported load into an EXTLOAD followed by an explicit
1469 // zero/sign extend inreg.
1470 Result
= DAG
.getExtLoad(ISD::EXTLOAD
, dl
, Node
->getValueType(0),
1471 Tmp1
, Tmp2
, LD
->getPointerInfo(), SrcVT
,
1472 LD
->isVolatile(), LD
->isNonTemporal(),
1473 LD
->getAlignment());
1475 if (ExtType
== ISD::SEXTLOAD
)
1476 ValRes
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
,
1477 Result
.getValueType(),
1478 Result
, DAG
.getValueType(SrcVT
));
1480 ValRes
= DAG
.getZeroExtendInReg(Result
, dl
, SrcVT
.getScalarType());
1481 Tmp1
= LegalizeOp(ValRes
); // Relegalize new nodes.
1482 Tmp2
= LegalizeOp(Result
.getValue(1)); // Relegalize new nodes.
1487 // Since loads produce two values, make sure to remember that we legalized
1489 AddLegalizedOperand(SDValue(Node
, 0), Tmp1
);
1490 AddLegalizedOperand(SDValue(Node
, 1), Tmp2
);
1491 return Op
.getResNo() ? Tmp2
: Tmp1
;
1494 StoreSDNode
*ST
= cast
<StoreSDNode
>(Node
);
1495 Tmp1
= LegalizeOp(ST
->getChain()); // Legalize the chain.
1496 Tmp2
= LegalizeOp(ST
->getBasePtr()); // Legalize the pointer.
1497 unsigned Alignment
= ST
->getAlignment();
1498 bool isVolatile
= ST
->isVolatile();
1499 bool isNonTemporal
= ST
->isNonTemporal();
1501 if (!ST
->isTruncatingStore()) {
1502 if (SDNode
*OptStore
= OptimizeFloatStore(ST
).getNode()) {
1503 Result
= SDValue(OptStore
, 0);
1508 Tmp3
= LegalizeOp(ST
->getValue());
1509 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1514 EVT VT
= Tmp3
.getValueType();
1515 switch (TLI
.getOperationAction(ISD::STORE
, VT
)) {
1516 default: assert(0 && "This action is not supported yet!");
1517 case TargetLowering::Legal
:
1518 // If this is an unaligned store and the target doesn't support it,
1520 if (!TLI
.allowsUnalignedMemoryAccesses(ST
->getMemoryVT())) {
1521 const Type
*Ty
= ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1522 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1523 if (ST
->getAlignment() < ABIAlignment
)
1524 Result
= ExpandUnalignedStore(cast
<StoreSDNode
>(Result
.getNode()),
1528 case TargetLowering::Custom
:
1529 Tmp1
= TLI
.LowerOperation(Result
, DAG
);
1530 if (Tmp1
.getNode()) Result
= Tmp1
;
1532 case TargetLowering::Promote
:
1533 assert(VT
.isVector() && "Unknown legal promote case!");
1534 Tmp3
= DAG
.getNode(ISD::BITCAST
, dl
,
1535 TLI
.getTypeToPromoteTo(ISD::STORE
, VT
), Tmp3
);
1536 Result
= DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
,
1537 ST
->getPointerInfo(), isVolatile
,
1538 isNonTemporal
, Alignment
);
1544 Tmp3
= LegalizeOp(ST
->getValue());
1546 EVT StVT
= ST
->getMemoryVT();
1547 unsigned StWidth
= StVT
.getSizeInBits();
1549 if (StWidth
!= StVT
.getStoreSizeInBits()) {
1550 // Promote to a byte-sized store with upper bits zero if not
1551 // storing an integral number of bytes. For example, promote
1552 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
1553 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(),
1554 StVT
.getStoreSizeInBits());
1555 Tmp3
= DAG
.getZeroExtendInReg(Tmp3
, dl
, StVT
);
1556 Result
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getPointerInfo(),
1557 NVT
, isVolatile
, isNonTemporal
, Alignment
);
1558 } else if (StWidth
& (StWidth
- 1)) {
1559 // If not storing a power-of-2 number of bits, expand as two stores.
1560 assert(!StVT
.isVector() && "Unsupported truncstore!");
1561 unsigned RoundWidth
= 1 << Log2_32(StWidth
);
1562 assert(RoundWidth
< StWidth
);
1563 unsigned ExtraWidth
= StWidth
- RoundWidth
;
1564 assert(ExtraWidth
< RoundWidth
);
1565 assert(!(RoundWidth
% 8) && !(ExtraWidth
% 8) &&
1566 "Store size not an integral number of bytes!");
1567 EVT RoundVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundWidth
);
1568 EVT ExtraVT
= EVT::getIntegerVT(*DAG
.getContext(), ExtraWidth
);
1570 unsigned IncrementSize
;
1572 if (TLI
.isLittleEndian()) {
1573 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
1574 // Store the bottom RoundWidth bits.
1575 Lo
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getPointerInfo(),
1577 isVolatile
, isNonTemporal
, Alignment
);
1579 // Store the remaining ExtraWidth bits.
1580 IncrementSize
= RoundWidth
/ 8;
1581 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1582 DAG
.getIntPtrConstant(IncrementSize
));
1583 Hi
= DAG
.getNode(ISD::SRL
, dl
, Tmp3
.getValueType(), Tmp3
,
1584 DAG
.getConstant(RoundWidth
,
1585 TLI
.getShiftAmountTy(Tmp3
.getValueType())));
1586 Hi
= DAG
.getTruncStore(Tmp1
, dl
, Hi
, Tmp2
,
1587 ST
->getPointerInfo().getWithOffset(IncrementSize
),
1588 ExtraVT
, isVolatile
, isNonTemporal
,
1589 MinAlign(Alignment
, IncrementSize
));
1591 // Big endian - avoid unaligned stores.
1592 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
1593 // Store the top RoundWidth bits.
1594 Hi
= DAG
.getNode(ISD::SRL
, dl
, Tmp3
.getValueType(), Tmp3
,
1595 DAG
.getConstant(ExtraWidth
,
1596 TLI
.getShiftAmountTy(Tmp3
.getValueType())));
1597 Hi
= DAG
.getTruncStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getPointerInfo(),
1598 RoundVT
, isVolatile
, isNonTemporal
, Alignment
);
1600 // Store the remaining ExtraWidth bits.
1601 IncrementSize
= RoundWidth
/ 8;
1602 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1603 DAG
.getIntPtrConstant(IncrementSize
));
1604 Lo
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
,
1605 ST
->getPointerInfo().getWithOffset(IncrementSize
),
1606 ExtraVT
, isVolatile
, isNonTemporal
,
1607 MinAlign(Alignment
, IncrementSize
));
1610 // The order of the stores doesn't matter.
1611 Result
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
, Hi
);
1613 if (Tmp1
!= ST
->getChain() || Tmp3
!= ST
->getValue() ||
1614 Tmp2
!= ST
->getBasePtr())
1615 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1620 switch (TLI
.getTruncStoreAction(ST
->getValue().getValueType(), StVT
)) {
1621 default: assert(0 && "This action is not supported yet!");
1622 case TargetLowering::Legal
:
1623 // If this is an unaligned store and the target doesn't support it,
1625 if (!TLI
.allowsUnalignedMemoryAccesses(ST
->getMemoryVT())) {
1626 const Type
*Ty
= ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1627 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1628 if (ST
->getAlignment() < ABIAlignment
)
1629 Result
= ExpandUnalignedStore(cast
<StoreSDNode
>(Result
.getNode()),
1633 case TargetLowering::Custom
:
1634 Result
= TLI
.LowerOperation(Result
, DAG
);
1638 EVT WideScalarVT
= Tmp3
.getValueType().getScalarType();
1639 EVT NarrowScalarVT
= StVT
.getScalarType();
1641 // The Store type is illegal, must scalarize the vector store.
1642 SmallVector
<SDValue
, 8> Stores
;
1643 bool ScalarLegal
= isTypeLegal(WideScalarVT
);
1644 if (!isTypeLegal(StVT
) && StVT
.isVector() && ScalarLegal
) {
1645 unsigned NumElem
= StVT
.getVectorNumElements();
1647 unsigned ScalarSize
= StVT
.getScalarType().getSizeInBits();
1648 // Round odd types to the next pow of two.
1649 if (!isPowerOf2_32(ScalarSize
))
1650 ScalarSize
= NextPowerOf2(ScalarSize
);
1651 // Types smaller than 8 bits are promoted to 8 bits.
1652 ScalarSize
= std::max
<unsigned>(ScalarSize
, 8);
1654 unsigned Stride
= ScalarSize
/8;
1655 assert(isPowerOf2_32(Stride
) && "Stride must be a power of two");
1657 for (unsigned Idx
=0; Idx
<NumElem
; Idx
++) {
1658 SDValue Ex
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
,
1659 WideScalarVT
, Tmp3
, DAG
.getIntPtrConstant(Idx
));
1662 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(), ScalarSize
);
1664 Ex
= DAG
.getNode(ISD::TRUNCATE
, dl
, NVT
, Ex
);
1665 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1666 DAG
.getIntPtrConstant(Stride
));
1667 SDValue Store
= DAG
.getStore(Tmp1
, dl
, Ex
, Tmp2
,
1668 ST
->getPointerInfo().getWithOffset(Idx
*Stride
),
1669 isVolatile
, isNonTemporal
, Alignment
);
1670 Stores
.push_back(Store
);
1672 Result
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1673 &Stores
[0], Stores
.size());
1677 // The Store type is illegal, must scalarize the vector store.
1678 // However, the scalar type is illegal. Must bitcast the result
1679 // and store it in smaller parts.
1680 if (!isTypeLegal(StVT
) && StVT
.isVector()) {
1681 unsigned WideNumElem
= StVT
.getVectorNumElements();
1682 unsigned Stride
= NarrowScalarVT
.getSizeInBits()/8;
1684 unsigned SizeRatio
=
1685 (WideScalarVT
.getSizeInBits() / NarrowScalarVT
.getSizeInBits());
1687 EVT CastValueVT
= EVT::getVectorVT(*DAG
.getContext(), NarrowScalarVT
,
1688 SizeRatio
*WideNumElem
);
1690 // Cast the wide elem vector to wider vec with smaller elem type.
1691 // Example <2 x i64> -> <4 x i32>
1692 Tmp3
= DAG
.getNode(ISD::BITCAST
, dl
, CastValueVT
, Tmp3
);
1694 for (unsigned Idx
=0; Idx
<WideNumElem
*SizeRatio
; Idx
++) {
1696 SDValue Ex
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
,
1697 NarrowScalarVT
, Tmp3
, DAG
.getIntPtrConstant(Idx
));
1699 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1700 DAG
.getIntPtrConstant(Stride
));
1702 // Store if, this element is:
1703 // - First element on big endian, or
1704 // - Last element on little endian
1705 if (( TLI
.isBigEndian() && (Idx
%SizeRatio
== 0)) ||
1706 ((!TLI
.isBigEndian() && (Idx
%SizeRatio
== SizeRatio
-1)))) {
1707 SDValue Store
= DAG
.getStore(Tmp1
, dl
, Ex
, Tmp2
,
1708 ST
->getPointerInfo().getWithOffset(Idx
*Stride
),
1709 isVolatile
, isNonTemporal
, Alignment
);
1710 Stores
.push_back(Store
);
1713 Result
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1714 &Stores
[0], Stores
.size());
1719 // TRUNCSTORE:i16 i32 -> STORE i16
1720 assert(isTypeLegal(StVT
) && "Do not know how to expand this store!");
1721 Tmp3
= DAG
.getNode(ISD::TRUNCATE
, dl
, StVT
, Tmp3
);
1722 Result
= DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getPointerInfo(),
1723 isVolatile
, isNonTemporal
, Alignment
);
1731 assert(Result
.getValueType() == Op
.getValueType() &&
1732 "Bad legalization!");
1734 // Make sure that the generated code is itself legal.
1736 Result
= LegalizeOp(Result
);
1738 // Note that LegalizeOp may be reentered even from single-use nodes, which
1739 // means that we always must cache transformed nodes.
1740 AddLegalizedOperand(Op
, Result
);
1744 SDValue
SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op
) {
1745 SDValue Vec
= Op
.getOperand(0);
1746 SDValue Idx
= Op
.getOperand(1);
1747 DebugLoc dl
= Op
.getDebugLoc();
1748 // Store the value to a temporary stack slot, then LOAD the returned part.
1749 SDValue StackPtr
= DAG
.CreateStackTemporary(Vec
.getValueType());
1750 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Vec
, StackPtr
,
1751 MachinePointerInfo(), false, false, 0);
1753 // Add the offset to the index.
1755 Vec
.getValueType().getVectorElementType().getSizeInBits()/8;
1756 Idx
= DAG
.getNode(ISD::MUL
, dl
, Idx
.getValueType(), Idx
,
1757 DAG
.getConstant(EltSize
, Idx
.getValueType()));
1759 if (Idx
.getValueType().bitsGT(TLI
.getPointerTy()))
1760 Idx
= DAG
.getNode(ISD::TRUNCATE
, dl
, TLI
.getPointerTy(), Idx
);
1762 Idx
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, TLI
.getPointerTy(), Idx
);
1764 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, Idx
.getValueType(), Idx
, StackPtr
);
1766 if (Op
.getValueType().isVector())
1767 return DAG
.getLoad(Op
.getValueType(), dl
, Ch
, StackPtr
,MachinePointerInfo(),
1769 return DAG
.getExtLoad(ISD::EXTLOAD
, dl
, Op
.getValueType(), Ch
, StackPtr
,
1770 MachinePointerInfo(),
1771 Vec
.getValueType().getVectorElementType(),
1775 SDValue
SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op
) {
1776 assert(Op
.getValueType().isVector() && "Non-vector insert subvector!");
1778 SDValue Vec
= Op
.getOperand(0);
1779 SDValue Part
= Op
.getOperand(1);
1780 SDValue Idx
= Op
.getOperand(2);
1781 DebugLoc dl
= Op
.getDebugLoc();
1783 // Store the value to a temporary stack slot, then LOAD the returned part.
1785 SDValue StackPtr
= DAG
.CreateStackTemporary(Vec
.getValueType());
1786 int FI
= cast
<FrameIndexSDNode
>(StackPtr
.getNode())->getIndex();
1787 MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(FI
);
1789 // First store the whole vector.
1790 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Vec
, StackPtr
, PtrInfo
,
1793 // Then store the inserted part.
1795 // Add the offset to the index.
1797 Vec
.getValueType().getVectorElementType().getSizeInBits()/8;
1799 Idx
= DAG
.getNode(ISD::MUL
, dl
, Idx
.getValueType(), Idx
,
1800 DAG
.getConstant(EltSize
, Idx
.getValueType()));
1802 if (Idx
.getValueType().bitsGT(TLI
.getPointerTy()))
1803 Idx
= DAG
.getNode(ISD::TRUNCATE
, dl
, TLI
.getPointerTy(), Idx
);
1805 Idx
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, TLI
.getPointerTy(), Idx
);
1807 SDValue SubStackPtr
= DAG
.getNode(ISD::ADD
, dl
, Idx
.getValueType(), Idx
,
1810 // Store the subvector.
1811 Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Part
, SubStackPtr
,
1812 MachinePointerInfo(), false, false, 0);
1814 // Finally, load the updated vector.
1815 return DAG
.getLoad(Op
.getValueType(), dl
, Ch
, StackPtr
, PtrInfo
,
1819 SDValue
SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode
* Node
) {
1820 // We can't handle this case efficiently. Allocate a sufficiently
1821 // aligned object on the stack, store each element into it, then load
1822 // the result as a vector.
1823 // Create the stack frame object.
1824 EVT VT
= Node
->getValueType(0);
1825 EVT EltVT
= VT
.getVectorElementType();
1826 DebugLoc dl
= Node
->getDebugLoc();
1827 SDValue FIPtr
= DAG
.CreateStackTemporary(VT
);
1828 int FI
= cast
<FrameIndexSDNode
>(FIPtr
.getNode())->getIndex();
1829 MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(FI
);
1831 // Emit a store of each element to the stack slot.
1832 SmallVector
<SDValue
, 8> Stores
;
1833 unsigned TypeByteSize
= EltVT
.getSizeInBits() / 8;
1834 // Store (in the right endianness) the elements to memory.
1835 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1836 // Ignore undef elements.
1837 if (Node
->getOperand(i
).getOpcode() == ISD::UNDEF
) continue;
1839 unsigned Offset
= TypeByteSize
*i
;
1841 SDValue Idx
= DAG
.getConstant(Offset
, FIPtr
.getValueType());
1842 Idx
= DAG
.getNode(ISD::ADD
, dl
, FIPtr
.getValueType(), FIPtr
, Idx
);
1844 // If the destination vector element type is narrower than the source
1845 // element type, only store the bits necessary.
1846 if (EltVT
.bitsLT(Node
->getOperand(i
).getValueType().getScalarType())) {
1847 Stores
.push_back(DAG
.getTruncStore(DAG
.getEntryNode(), dl
,
1848 Node
->getOperand(i
), Idx
,
1849 PtrInfo
.getWithOffset(Offset
),
1850 EltVT
, false, false, 0));
1852 Stores
.push_back(DAG
.getStore(DAG
.getEntryNode(), dl
,
1853 Node
->getOperand(i
), Idx
,
1854 PtrInfo
.getWithOffset(Offset
),
1859 if (!Stores
.empty()) // Not all undef elements?
1860 StoreChain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1861 &Stores
[0], Stores
.size());
1863 StoreChain
= DAG
.getEntryNode();
1865 // Result is a load from the stack slot.
1866 return DAG
.getLoad(VT
, dl
, StoreChain
, FIPtr
, PtrInfo
, false, false, 0);
1869 SDValue
SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode
* Node
) {
1870 DebugLoc dl
= Node
->getDebugLoc();
1871 SDValue Tmp1
= Node
->getOperand(0);
1872 SDValue Tmp2
= Node
->getOperand(1);
1874 // Get the sign bit of the RHS. First obtain a value that has the same
1875 // sign as the sign bit, i.e. negative if and only if the sign bit is 1.
1877 EVT FloatVT
= Tmp2
.getValueType();
1878 EVT IVT
= EVT::getIntegerVT(*DAG
.getContext(), FloatVT
.getSizeInBits());
1879 if (isTypeLegal(IVT
)) {
1880 // Convert to an integer with the same sign bit.
1881 SignBit
= DAG
.getNode(ISD::BITCAST
, dl
, IVT
, Tmp2
);
1883 // Store the float to memory, then load the sign part out as an integer.
1884 MVT LoadTy
= TLI
.getPointerTy();
1885 // First create a temporary that is aligned for both the load and store.
1886 SDValue StackPtr
= DAG
.CreateStackTemporary(FloatVT
, LoadTy
);
1887 // Then store the float to it.
1889 DAG
.getStore(DAG
.getEntryNode(), dl
, Tmp2
, StackPtr
, MachinePointerInfo(),
1891 if (TLI
.isBigEndian()) {
1892 assert(FloatVT
.isByteSized() && "Unsupported floating point type!");
1893 // Load out a legal integer with the same sign bit as the float.
1894 SignBit
= DAG
.getLoad(LoadTy
, dl
, Ch
, StackPtr
, MachinePointerInfo(),
1896 } else { // Little endian
1897 SDValue LoadPtr
= StackPtr
;
1898 // The float may be wider than the integer we are going to load. Advance
1899 // the pointer so that the loaded integer will contain the sign bit.
1900 unsigned Strides
= (FloatVT
.getSizeInBits()-1)/LoadTy
.getSizeInBits();
1901 unsigned ByteOffset
= (Strides
* LoadTy
.getSizeInBits()) / 8;
1902 LoadPtr
= DAG
.getNode(ISD::ADD
, dl
, LoadPtr
.getValueType(),
1903 LoadPtr
, DAG
.getIntPtrConstant(ByteOffset
));
1904 // Load a legal integer containing the sign bit.
1905 SignBit
= DAG
.getLoad(LoadTy
, dl
, Ch
, LoadPtr
, MachinePointerInfo(),
1907 // Move the sign bit to the top bit of the loaded integer.
1908 unsigned BitShift
= LoadTy
.getSizeInBits() -
1909 (FloatVT
.getSizeInBits() - 8 * ByteOffset
);
1910 assert(BitShift
< LoadTy
.getSizeInBits() && "Pointer advanced wrong?");
1912 SignBit
= DAG
.getNode(ISD::SHL
, dl
, LoadTy
, SignBit
,
1913 DAG
.getConstant(BitShift
,
1914 TLI
.getShiftAmountTy(SignBit
.getValueType())));
1917 // Now get the sign bit proper, by seeing whether the value is negative.
1918 SignBit
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(SignBit
.getValueType()),
1919 SignBit
, DAG
.getConstant(0, SignBit
.getValueType()),
1921 // Get the absolute value of the result.
1922 SDValue AbsVal
= DAG
.getNode(ISD::FABS
, dl
, Tmp1
.getValueType(), Tmp1
);
1923 // Select between the nabs and abs value based on the sign bit of
1925 return DAG
.getNode(ISD::SELECT
, dl
, AbsVal
.getValueType(), SignBit
,
1926 DAG
.getNode(ISD::FNEG
, dl
, AbsVal
.getValueType(), AbsVal
),
1930 void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode
* Node
,
1931 SmallVectorImpl
<SDValue
> &Results
) {
1932 unsigned SPReg
= TLI
.getStackPointerRegisterToSaveRestore();
1933 assert(SPReg
&& "Target cannot require DYNAMIC_STACKALLOC expansion and"
1934 " not tell us which reg is the stack pointer!");
1935 DebugLoc dl
= Node
->getDebugLoc();
1936 EVT VT
= Node
->getValueType(0);
1937 SDValue Tmp1
= SDValue(Node
, 0);
1938 SDValue Tmp2
= SDValue(Node
, 1);
1939 SDValue Tmp3
= Node
->getOperand(2);
1940 SDValue Chain
= Tmp1
.getOperand(0);
1942 // Chain the dynamic stack allocation so that it doesn't modify the stack
1943 // pointer when other instructions are using the stack.
1944 Chain
= DAG
.getCALLSEQ_START(Chain
, DAG
.getIntPtrConstant(0, true));
1946 SDValue Size
= Tmp2
.getOperand(1);
1947 SDValue SP
= DAG
.getCopyFromReg(Chain
, dl
, SPReg
, VT
);
1948 Chain
= SP
.getValue(1);
1949 unsigned Align
= cast
<ConstantSDNode
>(Tmp3
)->getZExtValue();
1950 unsigned StackAlign
= TM
.getFrameLowering()->getStackAlignment();
1951 if (Align
> StackAlign
)
1952 SP
= DAG
.getNode(ISD::AND
, dl
, VT
, SP
,
1953 DAG
.getConstant(-(uint64_t)Align
, VT
));
1954 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, VT
, SP
, Size
); // Value
1955 Chain
= DAG
.getCopyToReg(Chain
, dl
, SPReg
, Tmp1
); // Output chain
1957 Tmp2
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(0, true),
1958 DAG
.getIntPtrConstant(0, true), SDValue());
1960 Results
.push_back(Tmp1
);
1961 Results
.push_back(Tmp2
);
1964 /// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and
1965 /// condition code CC on the current target. This routine expands SETCC with
1966 /// illegal condition code into AND / OR of multiple SETCC values.
1967 void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT
,
1968 SDValue
&LHS
, SDValue
&RHS
,
1971 EVT OpVT
= LHS
.getValueType();
1972 ISD::CondCode CCCode
= cast
<CondCodeSDNode
>(CC
)->get();
1973 switch (TLI
.getCondCodeAction(CCCode
, OpVT
)) {
1974 default: assert(0 && "Unknown condition code action!");
1975 case TargetLowering::Legal
:
1978 case TargetLowering::Expand
: {
1979 ISD::CondCode CC1
= ISD::SETCC_INVALID
, CC2
= ISD::SETCC_INVALID
;
1982 default: assert(0 && "Don't know how to expand this condition!");
1983 case ISD::SETOEQ
: CC1
= ISD::SETEQ
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1984 case ISD::SETOGT
: CC1
= ISD::SETGT
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1985 case ISD::SETOGE
: CC1
= ISD::SETGE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1986 case ISD::SETOLT
: CC1
= ISD::SETLT
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1987 case ISD::SETOLE
: CC1
= ISD::SETLE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1988 case ISD::SETONE
: CC1
= ISD::SETNE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1989 case ISD::SETUEQ
: CC1
= ISD::SETEQ
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1990 case ISD::SETUGT
: CC1
= ISD::SETGT
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1991 case ISD::SETUGE
: CC1
= ISD::SETGE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1992 case ISD::SETULT
: CC1
= ISD::SETLT
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1993 case ISD::SETULE
: CC1
= ISD::SETLE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1994 case ISD::SETUNE
: CC1
= ISD::SETNE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1995 // FIXME: Implement more expansions.
1998 SDValue SetCC1
= DAG
.getSetCC(dl
, VT
, LHS
, RHS
, CC1
);
1999 SDValue SetCC2
= DAG
.getSetCC(dl
, VT
, LHS
, RHS
, CC2
);
2000 LHS
= DAG
.getNode(Opc
, dl
, VT
, SetCC1
, SetCC2
);
2008 /// EmitStackConvert - Emit a store/load combination to the stack. This stores
2009 /// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does
2010 /// a load from the stack slot to DestVT, extending it if needed.
2011 /// The resultant code need not be legal.
2012 SDValue
SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp
,
2016 // Create the stack frame object.
2018 TLI
.getTargetData()->getPrefTypeAlignment(SrcOp
.getValueType().
2019 getTypeForEVT(*DAG
.getContext()));
2020 SDValue FIPtr
= DAG
.CreateStackTemporary(SlotVT
, SrcAlign
);
2022 FrameIndexSDNode
*StackPtrFI
= cast
<FrameIndexSDNode
>(FIPtr
);
2023 int SPFI
= StackPtrFI
->getIndex();
2024 MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(SPFI
);
2026 unsigned SrcSize
= SrcOp
.getValueType().getSizeInBits();
2027 unsigned SlotSize
= SlotVT
.getSizeInBits();
2028 unsigned DestSize
= DestVT
.getSizeInBits();
2029 const Type
*DestType
= DestVT
.getTypeForEVT(*DAG
.getContext());
2030 unsigned DestAlign
= TLI
.getTargetData()->getPrefTypeAlignment(DestType
);
2032 // Emit a store to the stack slot. Use a truncstore if the input value is
2033 // later than DestVT.
2036 if (SrcSize
> SlotSize
)
2037 Store
= DAG
.getTruncStore(DAG
.getEntryNode(), dl
, SrcOp
, FIPtr
,
2038 PtrInfo
, SlotVT
, false, false, SrcAlign
);
2040 assert(SrcSize
== SlotSize
&& "Invalid store");
2041 Store
= DAG
.getStore(DAG
.getEntryNode(), dl
, SrcOp
, FIPtr
,
2042 PtrInfo
, false, false, SrcAlign
);
2045 // Result is a load from the stack slot.
2046 if (SlotSize
== DestSize
)
2047 return DAG
.getLoad(DestVT
, dl
, Store
, FIPtr
, PtrInfo
,
2048 false, false, DestAlign
);
2050 assert(SlotSize
< DestSize
&& "Unknown extension!");
2051 return DAG
.getExtLoad(ISD::EXTLOAD
, dl
, DestVT
, Store
, FIPtr
,
2052 PtrInfo
, SlotVT
, false, false, DestAlign
);
2055 SDValue
SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode
*Node
) {
2056 DebugLoc dl
= Node
->getDebugLoc();
2057 // Create a vector sized/aligned stack slot, store the value to element #0,
2058 // then load the whole vector back out.
2059 SDValue StackPtr
= DAG
.CreateStackTemporary(Node
->getValueType(0));
2061 FrameIndexSDNode
*StackPtrFI
= cast
<FrameIndexSDNode
>(StackPtr
);
2062 int SPFI
= StackPtrFI
->getIndex();
2064 SDValue Ch
= DAG
.getTruncStore(DAG
.getEntryNode(), dl
, Node
->getOperand(0),
2066 MachinePointerInfo::getFixedStack(SPFI
),
2067 Node
->getValueType(0).getVectorElementType(),
2069 return DAG
.getLoad(Node
->getValueType(0), dl
, Ch
, StackPtr
,
2070 MachinePointerInfo::getFixedStack(SPFI
),
2075 /// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't
2076 /// support the operation, but do support the resultant vector type.
2077 SDValue
SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode
*Node
) {
2078 unsigned NumElems
= Node
->getNumOperands();
2079 SDValue Value1
, Value2
;
2080 DebugLoc dl
= Node
->getDebugLoc();
2081 EVT VT
= Node
->getValueType(0);
2082 EVT OpVT
= Node
->getOperand(0).getValueType();
2083 EVT EltVT
= VT
.getVectorElementType();
2085 // If the only non-undef value is the low element, turn this into a
2086 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X.
2087 bool isOnlyLowElement
= true;
2088 bool MoreThanTwoValues
= false;
2089 bool isConstant
= true;
2090 for (unsigned i
= 0; i
< NumElems
; ++i
) {
2091 SDValue V
= Node
->getOperand(i
);
2092 if (V
.getOpcode() == ISD::UNDEF
)
2095 isOnlyLowElement
= false;
2096 if (!isa
<ConstantFPSDNode
>(V
) && !isa
<ConstantSDNode
>(V
))
2099 if (!Value1
.getNode()) {
2101 } else if (!Value2
.getNode()) {
2104 } else if (V
!= Value1
&& V
!= Value2
) {
2105 MoreThanTwoValues
= true;
2109 if (!Value1
.getNode())
2110 return DAG
.getUNDEF(VT
);
2112 if (isOnlyLowElement
)
2113 return DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Node
->getOperand(0));
2115 // If all elements are constants, create a load from the constant pool.
2117 std::vector
<Constant
*> CV
;
2118 for (unsigned i
= 0, e
= NumElems
; i
!= e
; ++i
) {
2119 if (ConstantFPSDNode
*V
=
2120 dyn_cast
<ConstantFPSDNode
>(Node
->getOperand(i
))) {
2121 CV
.push_back(const_cast<ConstantFP
*>(V
->getConstantFPValue()));
2122 } else if (ConstantSDNode
*V
=
2123 dyn_cast
<ConstantSDNode
>(Node
->getOperand(i
))) {
2125 CV
.push_back(const_cast<ConstantInt
*>(V
->getConstantIntValue()));
2127 // If OpVT and EltVT don't match, EltVT is not legal and the
2128 // element values have been promoted/truncated earlier. Undo this;
2129 // we don't want a v16i8 to become a v16i32 for example.
2130 const ConstantInt
*CI
= V
->getConstantIntValue();
2131 CV
.push_back(ConstantInt::get(EltVT
.getTypeForEVT(*DAG
.getContext()),
2132 CI
->getZExtValue()));
2135 assert(Node
->getOperand(i
).getOpcode() == ISD::UNDEF
);
2136 const Type
*OpNTy
= EltVT
.getTypeForEVT(*DAG
.getContext());
2137 CV
.push_back(UndefValue::get(OpNTy
));
2140 Constant
*CP
= ConstantVector::get(CV
);
2141 SDValue CPIdx
= DAG
.getConstantPool(CP
, TLI
.getPointerTy());
2142 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
2143 return DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(), CPIdx
,
2144 MachinePointerInfo::getConstantPool(),
2145 false, false, Alignment
);
2148 if (!MoreThanTwoValues
) {
2149 SmallVector
<int, 8> ShuffleVec(NumElems
, -1);
2150 for (unsigned i
= 0; i
< NumElems
; ++i
) {
2151 SDValue V
= Node
->getOperand(i
);
2152 if (V
.getOpcode() == ISD::UNDEF
)
2154 ShuffleVec
[i
] = V
== Value1
? 0 : NumElems
;
2156 if (TLI
.isShuffleMaskLegal(ShuffleVec
, Node
->getValueType(0))) {
2157 // Get the splatted value into the low element of a vector register.
2158 SDValue Vec1
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value1
);
2160 if (Value2
.getNode())
2161 Vec2
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value2
);
2163 Vec2
= DAG
.getUNDEF(VT
);
2165 // Return shuffle(LowValVec, undef, <0,0,0,0>)
2166 return DAG
.getVectorShuffle(VT
, dl
, Vec1
, Vec2
, ShuffleVec
.data());
2170 // Otherwise, we can't handle this case efficiently.
2171 return ExpandVectorBuildThroughStack(Node
);
2174 // ExpandLibCall - Expand a node into a call to a libcall. If the result value
2175 // does not fit into a register, return the lo part and set the hi part to the
2176 // by-reg argument. If it does fit into a single register, return the result
2177 // and leave the Hi part unset.
2178 SDValue
SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC
, SDNode
*Node
,
2180 // The input chain to this libcall is the entry node of the function.
2181 // Legalizing the call will automatically add the previous call to the
2183 SDValue InChain
= DAG
.getEntryNode();
2185 TargetLowering::ArgListTy Args
;
2186 TargetLowering::ArgListEntry Entry
;
2187 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
2188 EVT ArgVT
= Node
->getOperand(i
).getValueType();
2189 const Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
2190 Entry
.Node
= Node
->getOperand(i
); Entry
.Ty
= ArgTy
;
2191 Entry
.isSExt
= isSigned
;
2192 Entry
.isZExt
= !isSigned
;
2193 Args
.push_back(Entry
);
2195 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
2196 TLI
.getPointerTy());
2198 // Splice the libcall in wherever FindInputOutputChains tells us to.
2199 const Type
*RetTy
= Node
->getValueType(0).getTypeForEVT(*DAG
.getContext());
2201 // isTailCall may be true since the callee does not reference caller stack
2202 // frame. Check if it's in the right position.
2203 bool isTailCall
= isInTailCallPosition(DAG
, Node
, TLI
);
2204 std::pair
<SDValue
, SDValue
> CallInfo
=
2205 TLI
.LowerCallTo(InChain
, RetTy
, isSigned
, !isSigned
, false, false,
2206 0, TLI
.getLibcallCallingConv(LC
), isTailCall
,
2207 /*isReturnValueUsed=*/true,
2208 Callee
, Args
, DAG
, Node
->getDebugLoc());
2210 if (!CallInfo
.second
.getNode())
2211 // It's a tailcall, return the chain (which is the DAG root).
2212 return DAG
.getRoot();
2214 // Legalize the call sequence, starting with the chain. This will advance
2215 // the LastCALLSEQ to the legalized version of the CALLSEQ_END node that
2216 // was added by LowerCallTo (guaranteeing proper serialization of calls).
2217 LegalizeOp(CallInfo
.second
);
2218 return CallInfo
.first
;
2221 /// ExpandLibCall - Generate a libcall taking the given operands as arguments
2222 /// and returning a result of type RetVT.
2223 SDValue
SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC
, EVT RetVT
,
2224 const SDValue
*Ops
, unsigned NumOps
,
2225 bool isSigned
, DebugLoc dl
) {
2226 TargetLowering::ArgListTy Args
;
2227 Args
.reserve(NumOps
);
2229 TargetLowering::ArgListEntry Entry
;
2230 for (unsigned i
= 0; i
!= NumOps
; ++i
) {
2231 Entry
.Node
= Ops
[i
];
2232 Entry
.Ty
= Entry
.Node
.getValueType().getTypeForEVT(*DAG
.getContext());
2233 Entry
.isSExt
= isSigned
;
2234 Entry
.isZExt
= !isSigned
;
2235 Args
.push_back(Entry
);
2237 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
2238 TLI
.getPointerTy());
2240 const Type
*RetTy
= RetVT
.getTypeForEVT(*DAG
.getContext());
2241 std::pair
<SDValue
,SDValue
> CallInfo
=
2242 TLI
.LowerCallTo(DAG
.getEntryNode(), RetTy
, isSigned
, !isSigned
, false,
2243 false, 0, TLI
.getLibcallCallingConv(LC
), false,
2244 /*isReturnValueUsed=*/true,
2245 Callee
, Args
, DAG
, dl
);
2247 // Legalize the call sequence, starting with the chain. This will advance
2248 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
2249 // was added by LowerCallTo (guaranteeing proper serialization of calls).
2250 LegalizeOp(CallInfo
.second
);
2252 return CallInfo
.first
;
2255 // ExpandChainLibCall - Expand a node into a call to a libcall. Similar to
2256 // ExpandLibCall except that the first operand is the in-chain.
2257 std::pair
<SDValue
, SDValue
>
2258 SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC
,
2261 SDValue InChain
= Node
->getOperand(0);
2263 TargetLowering::ArgListTy Args
;
2264 TargetLowering::ArgListEntry Entry
;
2265 for (unsigned i
= 1, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
2266 EVT ArgVT
= Node
->getOperand(i
).getValueType();
2267 const Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
2268 Entry
.Node
= Node
->getOperand(i
);
2270 Entry
.isSExt
= isSigned
;
2271 Entry
.isZExt
= !isSigned
;
2272 Args
.push_back(Entry
);
2274 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
2275 TLI
.getPointerTy());
2277 // Splice the libcall in wherever FindInputOutputChains tells us to.
2278 const Type
*RetTy
= Node
->getValueType(0).getTypeForEVT(*DAG
.getContext());
2279 std::pair
<SDValue
, SDValue
> CallInfo
=
2280 TLI
.LowerCallTo(InChain
, RetTy
, isSigned
, !isSigned
, false, false,
2281 0, TLI
.getLibcallCallingConv(LC
), /*isTailCall=*/false,
2282 /*isReturnValueUsed=*/true,
2283 Callee
, Args
, DAG
, Node
->getDebugLoc());
2285 // Legalize the call sequence, starting with the chain. This will advance
2286 // the LastCALLSEQ to the legalized version of the CALLSEQ_END node that
2287 // was added by LowerCallTo (guaranteeing proper serialization of calls).
2288 LegalizeOp(CallInfo
.second
);
2292 SDValue
SelectionDAGLegalize::ExpandFPLibCall(SDNode
* Node
,
2293 RTLIB::Libcall Call_F32
,
2294 RTLIB::Libcall Call_F64
,
2295 RTLIB::Libcall Call_F80
,
2296 RTLIB::Libcall Call_PPCF128
) {
2298 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
2299 default: assert(0 && "Unexpected request for libcall!");
2300 case MVT::f32
: LC
= Call_F32
; break;
2301 case MVT::f64
: LC
= Call_F64
; break;
2302 case MVT::f80
: LC
= Call_F80
; break;
2303 case MVT::ppcf128
: LC
= Call_PPCF128
; break;
2305 return ExpandLibCall(LC
, Node
, false);
2308 SDValue
SelectionDAGLegalize::ExpandIntLibCall(SDNode
* Node
, bool isSigned
,
2309 RTLIB::Libcall Call_I8
,
2310 RTLIB::Libcall Call_I16
,
2311 RTLIB::Libcall Call_I32
,
2312 RTLIB::Libcall Call_I64
,
2313 RTLIB::Libcall Call_I128
) {
2315 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
2316 default: assert(0 && "Unexpected request for libcall!");
2317 case MVT::i8
: LC
= Call_I8
; break;
2318 case MVT::i16
: LC
= Call_I16
; break;
2319 case MVT::i32
: LC
= Call_I32
; break;
2320 case MVT::i64
: LC
= Call_I64
; break;
2321 case MVT::i128
: LC
= Call_I128
; break;
2323 return ExpandLibCall(LC
, Node
, isSigned
);
2326 /// isDivRemLibcallAvailable - Return true if divmod libcall is available.
2327 static bool isDivRemLibcallAvailable(SDNode
*Node
, bool isSigned
,
2328 const TargetLowering
&TLI
) {
2330 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
2331 default: assert(0 && "Unexpected request for libcall!");
2332 case MVT::i8
: LC
= isSigned
? RTLIB::SDIVREM_I8
: RTLIB::UDIVREM_I8
; break;
2333 case MVT::i16
: LC
= isSigned
? RTLIB::SDIVREM_I16
: RTLIB::UDIVREM_I16
; break;
2334 case MVT::i32
: LC
= isSigned
? RTLIB::SDIVREM_I32
: RTLIB::UDIVREM_I32
; break;
2335 case MVT::i64
: LC
= isSigned
? RTLIB::SDIVREM_I64
: RTLIB::UDIVREM_I64
; break;
2336 case MVT::i128
: LC
= isSigned
? RTLIB::SDIVREM_I128
:RTLIB::UDIVREM_I128
; break;
2339 return TLI
.getLibcallName(LC
) != 0;
2342 /// UseDivRem - Only issue divrem libcall if both quotient and remainder are
2344 static bool UseDivRem(SDNode
*Node
, bool isSigned
, bool isDIV
) {
2345 unsigned OtherOpcode
= 0;
2347 OtherOpcode
= isDIV
? ISD::SREM
: ISD::SDIV
;
2349 OtherOpcode
= isDIV
? ISD::UREM
: ISD::UDIV
;
2351 SDValue Op0
= Node
->getOperand(0);
2352 SDValue Op1
= Node
->getOperand(1);
2353 for (SDNode::use_iterator UI
= Op0
.getNode()->use_begin(),
2354 UE
= Op0
.getNode()->use_end(); UI
!= UE
; ++UI
) {
2358 if (User
->getOpcode() == OtherOpcode
&&
2359 User
->getOperand(0) == Op0
&&
2360 User
->getOperand(1) == Op1
)
2366 /// ExpandDivRemLibCall - Issue libcalls to __{u}divmod to compute div / rem
2369 SelectionDAGLegalize::ExpandDivRemLibCall(SDNode
*Node
,
2370 SmallVectorImpl
<SDValue
> &Results
) {
2371 unsigned Opcode
= Node
->getOpcode();
2372 bool isSigned
= Opcode
== ISD::SDIVREM
;
2375 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
2376 default: assert(0 && "Unexpected request for libcall!");
2377 case MVT::i8
: LC
= isSigned
? RTLIB::SDIVREM_I8
: RTLIB::UDIVREM_I8
; break;
2378 case MVT::i16
: LC
= isSigned
? RTLIB::SDIVREM_I16
: RTLIB::UDIVREM_I16
; break;
2379 case MVT::i32
: LC
= isSigned
? RTLIB::SDIVREM_I32
: RTLIB::UDIVREM_I32
; break;
2380 case MVT::i64
: LC
= isSigned
? RTLIB::SDIVREM_I64
: RTLIB::UDIVREM_I64
; break;
2381 case MVT::i128
: LC
= isSigned
? RTLIB::SDIVREM_I128
:RTLIB::UDIVREM_I128
; break;
2384 // The input chain to this libcall is the entry node of the function.
2385 // Legalizing the call will automatically add the previous call to the
2387 SDValue InChain
= DAG
.getEntryNode();
2389 EVT RetVT
= Node
->getValueType(0);
2390 const Type
*RetTy
= RetVT
.getTypeForEVT(*DAG
.getContext());
2392 TargetLowering::ArgListTy Args
;
2393 TargetLowering::ArgListEntry Entry
;
2394 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
2395 EVT ArgVT
= Node
->getOperand(i
).getValueType();
2396 const Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
2397 Entry
.Node
= Node
->getOperand(i
); Entry
.Ty
= ArgTy
;
2398 Entry
.isSExt
= isSigned
;
2399 Entry
.isZExt
= !isSigned
;
2400 Args
.push_back(Entry
);
2403 // Also pass the return address of the remainder.
2404 SDValue FIPtr
= DAG
.CreateStackTemporary(RetVT
);
2406 Entry
.Ty
= RetTy
->getPointerTo();
2407 Entry
.isSExt
= isSigned
;
2408 Entry
.isZExt
= !isSigned
;
2409 Args
.push_back(Entry
);
2411 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
2412 TLI
.getPointerTy());
2414 // Splice the libcall in wherever FindInputOutputChains tells us to.
2415 DebugLoc dl
= Node
->getDebugLoc();
2416 std::pair
<SDValue
, SDValue
> CallInfo
=
2417 TLI
.LowerCallTo(InChain
, RetTy
, isSigned
, !isSigned
, false, false,
2418 0, TLI
.getLibcallCallingConv(LC
), /*isTailCall=*/false,
2419 /*isReturnValueUsed=*/true, Callee
, Args
, DAG
, dl
);
2421 // Legalize the call sequence, starting with the chain. This will advance
2422 // the LastCALLSEQ to the legalized version of the CALLSEQ_END node that
2423 // was added by LowerCallTo (guaranteeing proper serialization of calls).
2424 LegalizeOp(CallInfo
.second
);
2426 // Remainder is loaded back from the stack frame.
2427 SDValue Rem
= DAG
.getLoad(RetVT
, dl
, getLastCALLSEQ(), FIPtr
,
2428 MachinePointerInfo(), false, false, 0);
2429 Results
.push_back(CallInfo
.first
);
2430 Results
.push_back(Rem
);
2433 /// ExpandLegalINT_TO_FP - This function is responsible for legalizing a
2434 /// INT_TO_FP operation of the specified operand when the target requests that
2435 /// we expand it. At this point, we know that the result and operand types are
2436 /// legal for the target.
2437 SDValue
SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned
,
2441 if (Op0
.getValueType() == MVT::i32
) {
2442 // simple 32-bit [signed|unsigned] integer to float/double expansion
2444 // Get the stack frame index of a 8 byte buffer.
2445 SDValue StackSlot
= DAG
.CreateStackTemporary(MVT::f64
);
2447 // word offset constant for Hi/Lo address computation
2448 SDValue WordOff
= DAG
.getConstant(sizeof(int), TLI
.getPointerTy());
2449 // set up Hi and Lo (into buffer) address based on endian
2450 SDValue Hi
= StackSlot
;
2451 SDValue Lo
= DAG
.getNode(ISD::ADD
, dl
,
2452 TLI
.getPointerTy(), StackSlot
, WordOff
);
2453 if (TLI
.isLittleEndian())
2456 // if signed map to unsigned space
2459 // constant used to invert sign bit (signed to unsigned mapping)
2460 SDValue SignBit
= DAG
.getConstant(0x80000000u
, MVT::i32
);
2461 Op0Mapped
= DAG
.getNode(ISD::XOR
, dl
, MVT::i32
, Op0
, SignBit
);
2465 // store the lo of the constructed double - based on integer input
2466 SDValue Store1
= DAG
.getStore(DAG
.getEntryNode(), dl
,
2467 Op0Mapped
, Lo
, MachinePointerInfo(),
2469 // initial hi portion of constructed double
2470 SDValue InitialHi
= DAG
.getConstant(0x43300000u
, MVT::i32
);
2471 // store the hi of the constructed double - biased exponent
2472 SDValue Store2
= DAG
.getStore(Store1
, dl
, InitialHi
, Hi
,
2473 MachinePointerInfo(),
2475 // load the constructed double
2476 SDValue Load
= DAG
.getLoad(MVT::f64
, dl
, Store2
, StackSlot
,
2477 MachinePointerInfo(), false, false, 0);
2478 // FP constant to bias correct the final result
2479 SDValue Bias
= DAG
.getConstantFP(isSigned
?
2480 BitsToDouble(0x4330000080000000ULL
) :
2481 BitsToDouble(0x4330000000000000ULL
),
2483 // subtract the bias
2484 SDValue Sub
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f64
, Load
, Bias
);
2487 // handle final rounding
2488 if (DestVT
== MVT::f64
) {
2491 } else if (DestVT
.bitsLT(MVT::f64
)) {
2492 Result
= DAG
.getNode(ISD::FP_ROUND
, dl
, DestVT
, Sub
,
2493 DAG
.getIntPtrConstant(0));
2494 } else if (DestVT
.bitsGT(MVT::f64
)) {
2495 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
, DestVT
, Sub
);
2499 assert(!isSigned
&& "Legalize cannot Expand SINT_TO_FP for i64 yet");
2500 // Code below here assumes !isSigned without checking again.
2502 // Implementation of unsigned i64 to f64 following the algorithm in
2503 // __floatundidf in compiler_rt. This implementation has the advantage
2504 // of performing rounding correctly, both in the default rounding mode
2505 // and in all alternate rounding modes.
2506 // TODO: Generalize this for use with other types.
2507 if (Op0
.getValueType() == MVT::i64
&& DestVT
== MVT::f64
) {
2509 DAG
.getConstant(UINT64_C(0x4330000000000000), MVT::i64
);
2510 SDValue TwoP84PlusTwoP52
=
2511 DAG
.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64
);
2513 DAG
.getConstant(UINT64_C(0x4530000000000000), MVT::i64
);
2515 SDValue Lo
= DAG
.getZeroExtendInReg(Op0
, dl
, MVT::i32
);
2516 SDValue Hi
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Op0
,
2517 DAG
.getConstant(32, MVT::i64
));
2518 SDValue LoOr
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, Lo
, TwoP52
);
2519 SDValue HiOr
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, Hi
, TwoP84
);
2520 SDValue LoFlt
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::f64
, LoOr
);
2521 SDValue HiFlt
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::f64
, HiOr
);
2522 SDValue HiSub
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f64
, HiFlt
,
2524 return DAG
.getNode(ISD::FADD
, dl
, MVT::f64
, LoFlt
, HiSub
);
2527 // Implementation of unsigned i64 to f32.
2528 // TODO: Generalize this for use with other types.
2529 if (Op0
.getValueType() == MVT::i64
&& DestVT
== MVT::f32
) {
2530 // For unsigned conversions, convert them to signed conversions using the
2531 // algorithm from the x86_64 __floatundidf in compiler_rt.
2533 SDValue Fast
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::f32
, Op0
);
2535 SDValue ShiftConst
=
2536 DAG
.getConstant(1, TLI
.getShiftAmountTy(Op0
.getValueType()));
2537 SDValue Shr
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Op0
, ShiftConst
);
2538 SDValue AndConst
= DAG
.getConstant(1, MVT::i64
);
2539 SDValue And
= DAG
.getNode(ISD::AND
, dl
, MVT::i64
, Op0
, AndConst
);
2540 SDValue Or
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, And
, Shr
);
2542 SDValue SignCvt
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::f32
, Or
);
2543 SDValue Slow
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, SignCvt
, SignCvt
);
2545 // TODO: This really should be implemented using a branch rather than a
2546 // select. We happen to get lucky and machinesink does the right
2547 // thing most of the time. This would be a good candidate for a
2548 //pseudo-op, or, even better, for whole-function isel.
2549 SDValue SignBitTest
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(MVT::i64
),
2550 Op0
, DAG
.getConstant(0, MVT::i64
), ISD::SETLT
);
2551 return DAG
.getNode(ISD::SELECT
, dl
, MVT::f32
, SignBitTest
, Slow
, Fast
);
2554 // Otherwise, implement the fully general conversion.
2556 SDValue And
= DAG
.getNode(ISD::AND
, dl
, MVT::i64
, Op0
,
2557 DAG
.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64
));
2558 SDValue Or
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, And
,
2559 DAG
.getConstant(UINT64_C(0x800), MVT::i64
));
2560 SDValue And2
= DAG
.getNode(ISD::AND
, dl
, MVT::i64
, Op0
,
2561 DAG
.getConstant(UINT64_C(0x7ff), MVT::i64
));
2562 SDValue Ne
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(MVT::i64
),
2563 And2
, DAG
.getConstant(UINT64_C(0), MVT::i64
), ISD::SETNE
);
2564 SDValue Sel
= DAG
.getNode(ISD::SELECT
, dl
, MVT::i64
, Ne
, Or
, Op0
);
2565 SDValue Ge
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(MVT::i64
),
2566 Op0
, DAG
.getConstant(UINT64_C(0x0020000000000000), MVT::i64
),
2568 SDValue Sel2
= DAG
.getNode(ISD::SELECT
, dl
, MVT::i64
, Ge
, Sel
, Op0
);
2569 EVT SHVT
= TLI
.getShiftAmountTy(Sel2
.getValueType());
2571 SDValue Sh
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Sel2
,
2572 DAG
.getConstant(32, SHVT
));
2573 SDValue Trunc
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Sh
);
2574 SDValue Fcvt
= DAG
.getNode(ISD::UINT_TO_FP
, dl
, MVT::f64
, Trunc
);
2576 DAG
.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64
);
2577 SDValue Fmul
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f64
, TwoP32
, Fcvt
);
2578 SDValue Lo
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Sel2
);
2579 SDValue Fcvt2
= DAG
.getNode(ISD::UINT_TO_FP
, dl
, MVT::f64
, Lo
);
2580 SDValue Fadd
= DAG
.getNode(ISD::FADD
, dl
, MVT::f64
, Fmul
, Fcvt2
);
2581 return DAG
.getNode(ISD::FP_ROUND
, dl
, MVT::f32
, Fadd
,
2582 DAG
.getIntPtrConstant(0));
2585 SDValue Tmp1
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, DestVT
, Op0
);
2587 SDValue SignSet
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(Op0
.getValueType()),
2588 Op0
, DAG
.getConstant(0, Op0
.getValueType()),
2590 SDValue Zero
= DAG
.getIntPtrConstant(0), Four
= DAG
.getIntPtrConstant(4);
2591 SDValue CstOffset
= DAG
.getNode(ISD::SELECT
, dl
, Zero
.getValueType(),
2592 SignSet
, Four
, Zero
);
2594 // If the sign bit of the integer is set, the large number will be treated
2595 // as a negative number. To counteract this, the dynamic code adds an
2596 // offset depending on the data type.
2598 switch (Op0
.getValueType().getSimpleVT().SimpleTy
) {
2599 default: assert(0 && "Unsupported integer type!");
2600 case MVT::i8
: FF
= 0x43800000ULL
; break; // 2^8 (as a float)
2601 case MVT::i16
: FF
= 0x47800000ULL
; break; // 2^16 (as a float)
2602 case MVT::i32
: FF
= 0x4F800000ULL
; break; // 2^32 (as a float)
2603 case MVT::i64
: FF
= 0x5F800000ULL
; break; // 2^64 (as a float)
2605 if (TLI
.isLittleEndian()) FF
<<= 32;
2606 Constant
*FudgeFactor
= ConstantInt::get(
2607 Type::getInt64Ty(*DAG
.getContext()), FF
);
2609 SDValue CPIdx
= DAG
.getConstantPool(FudgeFactor
, TLI
.getPointerTy());
2610 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
2611 CPIdx
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), CPIdx
, CstOffset
);
2612 Alignment
= std::min(Alignment
, 4u);
2614 if (DestVT
== MVT::f32
)
2615 FudgeInReg
= DAG
.getLoad(MVT::f32
, dl
, DAG
.getEntryNode(), CPIdx
,
2616 MachinePointerInfo::getConstantPool(),
2617 false, false, Alignment
);
2620 LegalizeOp(DAG
.getExtLoad(ISD::EXTLOAD
, dl
, DestVT
,
2621 DAG
.getEntryNode(), CPIdx
,
2622 MachinePointerInfo::getConstantPool(),
2623 MVT::f32
, false, false, Alignment
));
2626 return DAG
.getNode(ISD::FADD
, dl
, DestVT
, Tmp1
, FudgeInReg
);
2629 /// PromoteLegalINT_TO_FP - This function is responsible for legalizing a
2630 /// *INT_TO_FP operation of the specified operand when the target requests that
2631 /// we promote it. At this point, we know that the result and operand types are
2632 /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
2633 /// operation that takes a larger input.
2634 SDValue
SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp
,
2638 // First step, figure out the appropriate *INT_TO_FP operation to use.
2639 EVT NewInTy
= LegalOp
.getValueType();
2641 unsigned OpToUse
= 0;
2643 // Scan for the appropriate larger type to use.
2645 NewInTy
= (MVT::SimpleValueType
)(NewInTy
.getSimpleVT().SimpleTy
+1);
2646 assert(NewInTy
.isInteger() && "Ran out of possibilities!");
2648 // If the target supports SINT_TO_FP of this type, use it.
2649 if (TLI
.isOperationLegalOrCustom(ISD::SINT_TO_FP
, NewInTy
)) {
2650 OpToUse
= ISD::SINT_TO_FP
;
2653 if (isSigned
) continue;
2655 // If the target supports UINT_TO_FP of this type, use it.
2656 if (TLI
.isOperationLegalOrCustom(ISD::UINT_TO_FP
, NewInTy
)) {
2657 OpToUse
= ISD::UINT_TO_FP
;
2661 // Otherwise, try a larger type.
2664 // Okay, we found the operation and type to use. Zero extend our input to the
2665 // desired type then run the operation on it.
2666 return DAG
.getNode(OpToUse
, dl
, DestVT
,
2667 DAG
.getNode(isSigned
? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
,
2668 dl
, NewInTy
, LegalOp
));
2671 /// PromoteLegalFP_TO_INT - This function is responsible for legalizing a
2672 /// FP_TO_*INT operation of the specified operand when the target requests that
2673 /// we promote it. At this point, we know that the result and operand types are
2674 /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
2675 /// operation that returns a larger result.
2676 SDValue
SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp
,
2680 // First step, figure out the appropriate FP_TO*INT operation to use.
2681 EVT NewOutTy
= DestVT
;
2683 unsigned OpToUse
= 0;
2685 // Scan for the appropriate larger type to use.
2687 NewOutTy
= (MVT::SimpleValueType
)(NewOutTy
.getSimpleVT().SimpleTy
+1);
2688 assert(NewOutTy
.isInteger() && "Ran out of possibilities!");
2690 if (TLI
.isOperationLegalOrCustom(ISD::FP_TO_SINT
, NewOutTy
)) {
2691 OpToUse
= ISD::FP_TO_SINT
;
2695 if (TLI
.isOperationLegalOrCustom(ISD::FP_TO_UINT
, NewOutTy
)) {
2696 OpToUse
= ISD::FP_TO_UINT
;
2700 // Otherwise, try a larger type.
2704 // Okay, we found the operation and type to use.
2705 SDValue Operation
= DAG
.getNode(OpToUse
, dl
, NewOutTy
, LegalOp
);
2707 // Truncate the result of the extended FP_TO_*INT operation to the desired
2709 return DAG
.getNode(ISD::TRUNCATE
, dl
, DestVT
, Operation
);
2712 /// ExpandBSWAP - Open code the operations for BSWAP of the specified operation.
2714 SDValue
SelectionDAGLegalize::ExpandBSWAP(SDValue Op
, DebugLoc dl
) {
2715 EVT VT
= Op
.getValueType();
2716 EVT SHVT
= TLI
.getShiftAmountTy(VT
);
2717 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
, Tmp5
, Tmp6
, Tmp7
, Tmp8
;
2718 switch (VT
.getSimpleVT().SimpleTy
) {
2719 default: assert(0 && "Unhandled Expand type in BSWAP!");
2721 Tmp2
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2722 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2723 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp1
, Tmp2
);
2725 Tmp4
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2726 Tmp3
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2727 Tmp2
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2728 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2729 Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp3
, DAG
.getConstant(0xFF0000, VT
));
2730 Tmp2
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp2
, DAG
.getConstant(0xFF00, VT
));
2731 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp3
);
2732 Tmp2
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp2
, Tmp1
);
2733 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp2
);
2735 Tmp8
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(56, SHVT
));
2736 Tmp7
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(40, SHVT
));
2737 Tmp6
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2738 Tmp5
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2739 Tmp4
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2740 Tmp3
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2741 Tmp2
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(40, SHVT
));
2742 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(56, SHVT
));
2743 Tmp7
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp7
, DAG
.getConstant(255ULL<<48, VT
));
2744 Tmp6
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp6
, DAG
.getConstant(255ULL<<40, VT
));
2745 Tmp5
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp5
, DAG
.getConstant(255ULL<<32, VT
));
2746 Tmp4
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp4
, DAG
.getConstant(255ULL<<24, VT
));
2747 Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp3
, DAG
.getConstant(255ULL<<16, VT
));
2748 Tmp2
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp2
, DAG
.getConstant(255ULL<<8 , VT
));
2749 Tmp8
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp7
);
2750 Tmp6
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp6
, Tmp5
);
2751 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp3
);
2752 Tmp2
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp2
, Tmp1
);
2753 Tmp8
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp6
);
2754 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp2
);
2755 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp4
);
2759 /// SplatByte - Distribute ByteVal over NumBits bits.
2760 // FIXME: Move this helper to a common place.
2761 static APInt
SplatByte(unsigned NumBits
, uint8_t ByteVal
) {
2762 APInt Val
= APInt(NumBits
, ByteVal
);
2764 for (unsigned i
= NumBits
; i
> 8; i
>>= 1) {
2765 Val
= (Val
<< Shift
) | Val
;
2771 /// ExpandBitCount - Expand the specified bitcount instruction into operations.
2773 SDValue
SelectionDAGLegalize::ExpandBitCount(unsigned Opc
, SDValue Op
,
2776 default: assert(0 && "Cannot expand this yet!");
2778 EVT VT
= Op
.getValueType();
2779 EVT ShVT
= TLI
.getShiftAmountTy(VT
);
2780 unsigned Len
= VT
.getSizeInBits();
2782 assert(VT
.isInteger() && Len
<= 128 && Len
% 8 == 0 &&
2783 "CTPOP not implemented for this type.");
2785 // This is the "best" algorithm from
2786 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
2788 SDValue Mask55
= DAG
.getConstant(SplatByte(Len
, 0x55), VT
);
2789 SDValue Mask33
= DAG
.getConstant(SplatByte(Len
, 0x33), VT
);
2790 SDValue Mask0F
= DAG
.getConstant(SplatByte(Len
, 0x0F), VT
);
2791 SDValue Mask01
= DAG
.getConstant(SplatByte(Len
, 0x01), VT
);
2793 // v = v - ((v >> 1) & 0x55555555...)
2794 Op
= DAG
.getNode(ISD::SUB
, dl
, VT
, Op
,
2795 DAG
.getNode(ISD::AND
, dl
, VT
,
2796 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
,
2797 DAG
.getConstant(1, ShVT
)),
2799 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
2800 Op
= DAG
.getNode(ISD::ADD
, dl
, VT
,
2801 DAG
.getNode(ISD::AND
, dl
, VT
, Op
, Mask33
),
2802 DAG
.getNode(ISD::AND
, dl
, VT
,
2803 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
,
2804 DAG
.getConstant(2, ShVT
)),
2806 // v = (v + (v >> 4)) & 0x0F0F0F0F...
2807 Op
= DAG
.getNode(ISD::AND
, dl
, VT
,
2808 DAG
.getNode(ISD::ADD
, dl
, VT
, Op
,
2809 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
,
2810 DAG
.getConstant(4, ShVT
))),
2812 // v = (v * 0x01010101...) >> (Len - 8)
2813 Op
= DAG
.getNode(ISD::SRL
, dl
, VT
,
2814 DAG
.getNode(ISD::MUL
, dl
, VT
, Op
, Mask01
),
2815 DAG
.getConstant(Len
- 8, ShVT
));
2820 // for now, we do this:
2821 // x = x | (x >> 1);
2822 // x = x | (x >> 2);
2824 // x = x | (x >>16);
2825 // x = x | (x >>32); // for 64-bit input
2826 // return popcount(~x);
2828 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc
2829 EVT VT
= Op
.getValueType();
2830 EVT ShVT
= TLI
.getShiftAmountTy(VT
);
2831 unsigned len
= VT
.getSizeInBits();
2832 for (unsigned i
= 0; (1U << i
) <= (len
/ 2); ++i
) {
2833 SDValue Tmp3
= DAG
.getConstant(1ULL << i
, ShVT
);
2834 Op
= DAG
.getNode(ISD::OR
, dl
, VT
, Op
,
2835 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, Tmp3
));
2837 Op
= DAG
.getNOT(dl
, Op
, VT
);
2838 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Op
);
2841 // for now, we use: { return popcount(~x & (x - 1)); }
2842 // unless the target has ctlz but not ctpop, in which case we use:
2843 // { return 32 - nlz(~x & (x-1)); }
2844 // see also http://www.hackersdelight.org/HDcode/ntz.cc
2845 EVT VT
= Op
.getValueType();
2846 SDValue Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
,
2847 DAG
.getNOT(dl
, Op
, VT
),
2848 DAG
.getNode(ISD::SUB
, dl
, VT
, Op
,
2849 DAG
.getConstant(1, VT
)));
2850 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
2851 if (!TLI
.isOperationLegalOrCustom(ISD::CTPOP
, VT
) &&
2852 TLI
.isOperationLegalOrCustom(ISD::CTLZ
, VT
))
2853 return DAG
.getNode(ISD::SUB
, dl
, VT
,
2854 DAG
.getConstant(VT
.getSizeInBits(), VT
),
2855 DAG
.getNode(ISD::CTLZ
, dl
, VT
, Tmp3
));
2856 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Tmp3
);
2861 std::pair
<SDValue
, SDValue
> SelectionDAGLegalize::ExpandAtomic(SDNode
*Node
) {
2862 unsigned Opc
= Node
->getOpcode();
2863 MVT VT
= cast
<AtomicSDNode
>(Node
)->getMemoryVT().getSimpleVT();
2868 llvm_unreachable("Unhandled atomic intrinsic Expand!");
2870 case ISD::ATOMIC_SWAP
:
2871 switch (VT
.SimpleTy
) {
2872 default: llvm_unreachable("Unexpected value type for atomic!");
2873 case MVT::i8
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_1
; break;
2874 case MVT::i16
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_2
; break;
2875 case MVT::i32
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_4
; break;
2876 case MVT::i64
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_8
; break;
2879 case ISD::ATOMIC_CMP_SWAP
:
2880 switch (VT
.SimpleTy
) {
2881 default: llvm_unreachable("Unexpected value type for atomic!");
2882 case MVT::i8
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1
; break;
2883 case MVT::i16
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2
; break;
2884 case MVT::i32
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4
; break;
2885 case MVT::i64
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8
; break;
2888 case ISD::ATOMIC_LOAD_ADD
:
2889 switch (VT
.SimpleTy
) {
2890 default: llvm_unreachable("Unexpected value type for atomic!");
2891 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_ADD_1
; break;
2892 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_ADD_2
; break;
2893 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_ADD_4
; break;
2894 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_ADD_8
; break;
2897 case ISD::ATOMIC_LOAD_SUB
:
2898 switch (VT
.SimpleTy
) {
2899 default: llvm_unreachable("Unexpected value type for atomic!");
2900 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_SUB_1
; break;
2901 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_SUB_2
; break;
2902 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_SUB_4
; break;
2903 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_SUB_8
; break;
2906 case ISD::ATOMIC_LOAD_AND
:
2907 switch (VT
.SimpleTy
) {
2908 default: llvm_unreachable("Unexpected value type for atomic!");
2909 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_AND_1
; break;
2910 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_AND_2
; break;
2911 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_AND_4
; break;
2912 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_AND_8
; break;
2915 case ISD::ATOMIC_LOAD_OR
:
2916 switch (VT
.SimpleTy
) {
2917 default: llvm_unreachable("Unexpected value type for atomic!");
2918 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_OR_1
; break;
2919 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_OR_2
; break;
2920 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_OR_4
; break;
2921 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_OR_8
; break;
2924 case ISD::ATOMIC_LOAD_XOR
:
2925 switch (VT
.SimpleTy
) {
2926 default: llvm_unreachable("Unexpected value type for atomic!");
2927 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_XOR_1
; break;
2928 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_XOR_2
; break;
2929 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_XOR_4
; break;
2930 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_XOR_8
; break;
2933 case ISD::ATOMIC_LOAD_NAND
:
2934 switch (VT
.SimpleTy
) {
2935 default: llvm_unreachable("Unexpected value type for atomic!");
2936 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_NAND_1
; break;
2937 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_NAND_2
; break;
2938 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_NAND_4
; break;
2939 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_NAND_8
; break;
2944 return ExpandChainLibCall(LC
, Node
, false);
2947 void SelectionDAGLegalize::ExpandNode(SDNode
*Node
,
2948 SmallVectorImpl
<SDValue
> &Results
) {
2949 DebugLoc dl
= Node
->getDebugLoc();
2950 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
;
2951 switch (Node
->getOpcode()) {
2955 Tmp1
= ExpandBitCount(Node
->getOpcode(), Node
->getOperand(0), dl
);
2956 Results
.push_back(Tmp1
);
2959 Results
.push_back(ExpandBSWAP(Node
->getOperand(0), dl
));
2961 case ISD::FRAMEADDR
:
2962 case ISD::RETURNADDR
:
2963 case ISD::FRAME_TO_ARGS_OFFSET
:
2964 Results
.push_back(DAG
.getConstant(0, Node
->getValueType(0)));
2966 case ISD::FLT_ROUNDS_
:
2967 Results
.push_back(DAG
.getConstant(1, Node
->getValueType(0)));
2969 case ISD::EH_RETURN
:
2973 case ISD::EH_SJLJ_LONGJMP
:
2974 case ISD::EH_SJLJ_DISPATCHSETUP
:
2975 // If the target didn't expand these, there's nothing to do, so just
2976 // preserve the chain and be done.
2977 Results
.push_back(Node
->getOperand(0));
2979 case ISD::EH_SJLJ_SETJMP
:
2980 // If the target didn't expand this, just return 'zero' and preserve the
2982 Results
.push_back(DAG
.getConstant(0, MVT::i32
));
2983 Results
.push_back(Node
->getOperand(0));
2985 case ISD::MEMBARRIER
: {
2986 // If the target didn't lower this, lower it to '__sync_synchronize()' call
2987 TargetLowering::ArgListTy Args
;
2988 std::pair
<SDValue
, SDValue
> CallResult
=
2989 TLI
.LowerCallTo(Node
->getOperand(0), Type::getVoidTy(*DAG
.getContext()),
2990 false, false, false, false, 0, CallingConv::C
,
2991 /*isTailCall=*/false,
2992 /*isReturnValueUsed=*/true,
2993 DAG
.getExternalSymbol("__sync_synchronize",
2994 TLI
.getPointerTy()),
2996 Results
.push_back(CallResult
.second
);
2999 // By default, atomic intrinsics are marked Legal and lowered. Targets
3000 // which don't support them directly, however, may want libcalls, in which
3001 // case they mark them Expand, and we get here.
3002 case ISD::ATOMIC_SWAP
:
3003 case ISD::ATOMIC_LOAD_ADD
:
3004 case ISD::ATOMIC_LOAD_SUB
:
3005 case ISD::ATOMIC_LOAD_AND
:
3006 case ISD::ATOMIC_LOAD_OR
:
3007 case ISD::ATOMIC_LOAD_XOR
:
3008 case ISD::ATOMIC_LOAD_NAND
:
3009 case ISD::ATOMIC_LOAD_MIN
:
3010 case ISD::ATOMIC_LOAD_MAX
:
3011 case ISD::ATOMIC_LOAD_UMIN
:
3012 case ISD::ATOMIC_LOAD_UMAX
:
3013 case ISD::ATOMIC_CMP_SWAP
: {
3014 std::pair
<SDValue
, SDValue
> Tmp
= ExpandAtomic(Node
);
3015 Results
.push_back(Tmp
.first
);
3016 Results
.push_back(Tmp
.second
);
3019 case ISD::DYNAMIC_STACKALLOC
:
3020 ExpandDYNAMIC_STACKALLOC(Node
, Results
);
3022 case ISD::MERGE_VALUES
:
3023 for (unsigned i
= 0; i
< Node
->getNumValues(); i
++)
3024 Results
.push_back(Node
->getOperand(i
));
3027 EVT VT
= Node
->getValueType(0);
3029 Results
.push_back(DAG
.getConstant(0, VT
));
3031 assert(VT
.isFloatingPoint() && "Unknown value type!");
3032 Results
.push_back(DAG
.getConstantFP(0, VT
));
3037 // If this operation is not supported, lower it to 'abort()' call
3038 TargetLowering::ArgListTy Args
;
3039 std::pair
<SDValue
, SDValue
> CallResult
=
3040 TLI
.LowerCallTo(Node
->getOperand(0), Type::getVoidTy(*DAG
.getContext()),
3041 false, false, false, false, 0, CallingConv::C
,
3042 /*isTailCall=*/false,
3043 /*isReturnValueUsed=*/true,
3044 DAG
.getExternalSymbol("abort", TLI
.getPointerTy()),
3046 Results
.push_back(CallResult
.second
);
3051 Tmp1
= EmitStackConvert(Node
->getOperand(0), Node
->getValueType(0),
3052 Node
->getValueType(0), dl
);
3053 Results
.push_back(Tmp1
);
3055 case ISD::FP_EXTEND
:
3056 Tmp1
= EmitStackConvert(Node
->getOperand(0),
3057 Node
->getOperand(0).getValueType(),
3058 Node
->getValueType(0), dl
);
3059 Results
.push_back(Tmp1
);
3061 case ISD::SIGN_EXTEND_INREG
: {
3062 // NOTE: we could fall back on load/store here too for targets without
3063 // SAR. However, it is doubtful that any exist.
3064 EVT ExtraVT
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
3065 EVT VT
= Node
->getValueType(0);
3066 EVT ShiftAmountTy
= TLI
.getShiftAmountTy(VT
);
3069 unsigned BitsDiff
= VT
.getScalarType().getSizeInBits() -
3070 ExtraVT
.getScalarType().getSizeInBits();
3071 SDValue ShiftCst
= DAG
.getConstant(BitsDiff
, ShiftAmountTy
);
3072 Tmp1
= DAG
.getNode(ISD::SHL
, dl
, Node
->getValueType(0),
3073 Node
->getOperand(0), ShiftCst
);
3074 Tmp1
= DAG
.getNode(ISD::SRA
, dl
, Node
->getValueType(0), Tmp1
, ShiftCst
);
3075 Results
.push_back(Tmp1
);
3078 case ISD::FP_ROUND_INREG
: {
3079 // The only way we can lower this is to turn it into a TRUNCSTORE,
3080 // EXTLOAD pair, targeting a temporary location (a stack slot).
3082 // NOTE: there is a choice here between constantly creating new stack
3083 // slots and always reusing the same one. We currently always create
3084 // new ones, as reuse may inhibit scheduling.
3085 EVT ExtraVT
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
3086 Tmp1
= EmitStackConvert(Node
->getOperand(0), ExtraVT
,
3087 Node
->getValueType(0), dl
);
3088 Results
.push_back(Tmp1
);
3091 case ISD::SINT_TO_FP
:
3092 case ISD::UINT_TO_FP
:
3093 Tmp1
= ExpandLegalINT_TO_FP(Node
->getOpcode() == ISD::SINT_TO_FP
,
3094 Node
->getOperand(0), Node
->getValueType(0), dl
);
3095 Results
.push_back(Tmp1
);
3097 case ISD::FP_TO_UINT
: {
3098 SDValue True
, False
;
3099 EVT VT
= Node
->getOperand(0).getValueType();
3100 EVT NVT
= Node
->getValueType(0);
3101 APFloat
apf(APInt::getNullValue(VT
.getSizeInBits()));
3102 APInt x
= APInt::getSignBit(NVT
.getSizeInBits());
3103 (void)apf
.convertFromAPInt(x
, false, APFloat::rmNearestTiesToEven
);
3104 Tmp1
= DAG
.getConstantFP(apf
, VT
);
3105 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
),
3106 Node
->getOperand(0),
3108 True
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, NVT
, Node
->getOperand(0));
3109 False
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, NVT
,
3110 DAG
.getNode(ISD::FSUB
, dl
, VT
,
3111 Node
->getOperand(0), Tmp1
));
3112 False
= DAG
.getNode(ISD::XOR
, dl
, NVT
, False
,
3113 DAG
.getConstant(x
, NVT
));
3114 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp2
, True
, False
);
3115 Results
.push_back(Tmp1
);
3119 const Value
*V
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
3120 EVT VT
= Node
->getValueType(0);
3121 Tmp1
= Node
->getOperand(0);
3122 Tmp2
= Node
->getOperand(1);
3123 unsigned Align
= Node
->getConstantOperandVal(3);
3125 SDValue VAListLoad
= DAG
.getLoad(TLI
.getPointerTy(), dl
, Tmp1
, Tmp2
,
3126 MachinePointerInfo(V
), false, false, 0);
3127 SDValue VAList
= VAListLoad
;
3129 if (Align
> TLI
.getMinStackArgumentAlignment()) {
3130 assert(((Align
& (Align
-1)) == 0) && "Expected Align to be a power of 2");
3132 VAList
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), VAList
,
3133 DAG
.getConstant(Align
- 1,
3134 TLI
.getPointerTy()));
3136 VAList
= DAG
.getNode(ISD::AND
, dl
, TLI
.getPointerTy(), VAList
,
3137 DAG
.getConstant(-(int64_t)Align
,
3138 TLI
.getPointerTy()));
3141 // Increment the pointer, VAList, to the next vaarg
3142 Tmp3
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), VAList
,
3143 DAG
.getConstant(TLI
.getTargetData()->
3144 getTypeAllocSize(VT
.getTypeForEVT(*DAG
.getContext())),
3145 TLI
.getPointerTy()));
3146 // Store the incremented VAList to the legalized pointer
3147 Tmp3
= DAG
.getStore(VAListLoad
.getValue(1), dl
, Tmp3
, Tmp2
,
3148 MachinePointerInfo(V
), false, false, 0);
3149 // Load the actual argument out of the pointer VAList
3150 Results
.push_back(DAG
.getLoad(VT
, dl
, Tmp3
, VAList
, MachinePointerInfo(),
3152 Results
.push_back(Results
[0].getValue(1));
3156 // This defaults to loading a pointer from the input and storing it to the
3157 // output, returning the chain.
3158 const Value
*VD
= cast
<SrcValueSDNode
>(Node
->getOperand(3))->getValue();
3159 const Value
*VS
= cast
<SrcValueSDNode
>(Node
->getOperand(4))->getValue();
3160 Tmp1
= DAG
.getLoad(TLI
.getPointerTy(), dl
, Node
->getOperand(0),
3161 Node
->getOperand(2), MachinePointerInfo(VS
),
3163 Tmp1
= DAG
.getStore(Tmp1
.getValue(1), dl
, Tmp1
, Node
->getOperand(1),
3164 MachinePointerInfo(VD
), false, false, 0);
3165 Results
.push_back(Tmp1
);
3168 case ISD::EXTRACT_VECTOR_ELT
:
3169 if (Node
->getOperand(0).getValueType().getVectorNumElements() == 1)
3170 // This must be an access of the only element. Return it.
3171 Tmp1
= DAG
.getNode(ISD::BITCAST
, dl
, Node
->getValueType(0),
3172 Node
->getOperand(0));
3174 Tmp1
= ExpandExtractFromVectorThroughStack(SDValue(Node
, 0));
3175 Results
.push_back(Tmp1
);
3177 case ISD::EXTRACT_SUBVECTOR
:
3178 Results
.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node
, 0)));
3180 case ISD::INSERT_SUBVECTOR
:
3181 Results
.push_back(ExpandInsertToVectorThroughStack(SDValue(Node
, 0)));
3183 case ISD::CONCAT_VECTORS
: {
3184 Results
.push_back(ExpandVectorBuildThroughStack(Node
));
3187 case ISD::SCALAR_TO_VECTOR
:
3188 Results
.push_back(ExpandSCALAR_TO_VECTOR(Node
));
3190 case ISD::INSERT_VECTOR_ELT
:
3191 Results
.push_back(ExpandINSERT_VECTOR_ELT(Node
->getOperand(0),
3192 Node
->getOperand(1),
3193 Node
->getOperand(2), dl
));
3195 case ISD::VECTOR_SHUFFLE
: {
3196 SmallVector
<int, 8> Mask
;
3197 cast
<ShuffleVectorSDNode
>(Node
)->getMask(Mask
);
3199 EVT VT
= Node
->getValueType(0);
3200 EVT EltVT
= VT
.getVectorElementType();
3201 if (getTypeAction(EltVT
) == Promote
)
3202 EltVT
= TLI
.getTypeToTransformTo(*DAG
.getContext(), EltVT
);
3203 unsigned NumElems
= VT
.getVectorNumElements();
3204 SmallVector
<SDValue
, 8> Ops
;
3205 for (unsigned i
= 0; i
!= NumElems
; ++i
) {
3207 Ops
.push_back(DAG
.getUNDEF(EltVT
));
3210 unsigned Idx
= Mask
[i
];
3212 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
3213 Node
->getOperand(0),
3214 DAG
.getIntPtrConstant(Idx
)));
3216 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
3217 Node
->getOperand(1),
3218 DAG
.getIntPtrConstant(Idx
- NumElems
)));
3220 Tmp1
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, VT
, &Ops
[0], Ops
.size());
3221 Results
.push_back(Tmp1
);
3224 case ISD::EXTRACT_ELEMENT
: {
3225 EVT OpTy
= Node
->getOperand(0).getValueType();
3226 if (cast
<ConstantSDNode
>(Node
->getOperand(1))->getZExtValue()) {
3228 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, OpTy
, Node
->getOperand(0),
3229 DAG
.getConstant(OpTy
.getSizeInBits()/2,
3230 TLI
.getShiftAmountTy(Node
->getOperand(0).getValueType())));
3231 Tmp1
= DAG
.getNode(ISD::TRUNCATE
, dl
, Node
->getValueType(0), Tmp1
);
3234 Tmp1
= DAG
.getNode(ISD::TRUNCATE
, dl
, Node
->getValueType(0),
3235 Node
->getOperand(0));
3237 Results
.push_back(Tmp1
);
3240 case ISD::STACKSAVE
:
3241 // Expand to CopyFromReg if the target set
3242 // StackPointerRegisterToSaveRestore.
3243 if (unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore()) {
3244 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(0), dl
, SP
,
3245 Node
->getValueType(0)));
3246 Results
.push_back(Results
[0].getValue(1));
3248 Results
.push_back(DAG
.getUNDEF(Node
->getValueType(0)));
3249 Results
.push_back(Node
->getOperand(0));
3252 case ISD::STACKRESTORE
:
3253 // Expand to CopyToReg if the target set
3254 // StackPointerRegisterToSaveRestore.
3255 if (unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore()) {
3256 Results
.push_back(DAG
.getCopyToReg(Node
->getOperand(0), dl
, SP
,
3257 Node
->getOperand(1)));
3259 Results
.push_back(Node
->getOperand(0));
3262 case ISD::FCOPYSIGN
:
3263 Results
.push_back(ExpandFCOPYSIGN(Node
));
3266 // Expand Y = FNEG(X) -> Y = SUB -0.0, X
3267 Tmp1
= DAG
.getConstantFP(-0.0, Node
->getValueType(0));
3268 Tmp1
= DAG
.getNode(ISD::FSUB
, dl
, Node
->getValueType(0), Tmp1
,
3269 Node
->getOperand(0));
3270 Results
.push_back(Tmp1
);
3273 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X).
3274 EVT VT
= Node
->getValueType(0);
3275 Tmp1
= Node
->getOperand(0);
3276 Tmp2
= DAG
.getConstantFP(0.0, VT
);
3277 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(Tmp1
.getValueType()),
3278 Tmp1
, Tmp2
, ISD::SETUGT
);
3279 Tmp3
= DAG
.getNode(ISD::FNEG
, dl
, VT
, Tmp1
);
3280 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, VT
, Tmp2
, Tmp1
, Tmp3
);
3281 Results
.push_back(Tmp1
);
3285 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::SQRT_F32
, RTLIB::SQRT_F64
,
3286 RTLIB::SQRT_F80
, RTLIB::SQRT_PPCF128
));
3289 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::SIN_F32
, RTLIB::SIN_F64
,
3290 RTLIB::SIN_F80
, RTLIB::SIN_PPCF128
));
3293 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::COS_F32
, RTLIB::COS_F64
,
3294 RTLIB::COS_F80
, RTLIB::COS_PPCF128
));
3297 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG_F32
, RTLIB::LOG_F64
,
3298 RTLIB::LOG_F80
, RTLIB::LOG_PPCF128
));
3301 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG2_F32
, RTLIB::LOG2_F64
,
3302 RTLIB::LOG2_F80
, RTLIB::LOG2_PPCF128
));
3305 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG10_F32
, RTLIB::LOG10_F64
,
3306 RTLIB::LOG10_F80
, RTLIB::LOG10_PPCF128
));
3309 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::EXP_F32
, RTLIB::EXP_F64
,
3310 RTLIB::EXP_F80
, RTLIB::EXP_PPCF128
));
3313 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::EXP2_F32
, RTLIB::EXP2_F64
,
3314 RTLIB::EXP2_F80
, RTLIB::EXP2_PPCF128
));
3317 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::TRUNC_F32
, RTLIB::TRUNC_F64
,
3318 RTLIB::TRUNC_F80
, RTLIB::TRUNC_PPCF128
));
3321 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::FLOOR_F32
, RTLIB::FLOOR_F64
,
3322 RTLIB::FLOOR_F80
, RTLIB::FLOOR_PPCF128
));
3325 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::CEIL_F32
, RTLIB::CEIL_F64
,
3326 RTLIB::CEIL_F80
, RTLIB::CEIL_PPCF128
));
3329 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::RINT_F32
, RTLIB::RINT_F64
,
3330 RTLIB::RINT_F80
, RTLIB::RINT_PPCF128
));
3332 case ISD::FNEARBYINT
:
3333 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::NEARBYINT_F32
,
3334 RTLIB::NEARBYINT_F64
,
3335 RTLIB::NEARBYINT_F80
,
3336 RTLIB::NEARBYINT_PPCF128
));
3339 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::POWI_F32
, RTLIB::POWI_F64
,
3340 RTLIB::POWI_F80
, RTLIB::POWI_PPCF128
));
3343 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::POW_F32
, RTLIB::POW_F64
,
3344 RTLIB::POW_F80
, RTLIB::POW_PPCF128
));
3347 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::DIV_F32
, RTLIB::DIV_F64
,
3348 RTLIB::DIV_F80
, RTLIB::DIV_PPCF128
));
3351 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::REM_F32
, RTLIB::REM_F64
,
3352 RTLIB::REM_F80
, RTLIB::REM_PPCF128
));
3355 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::FMA_F32
, RTLIB::FMA_F64
,
3356 RTLIB::FMA_F80
, RTLIB::FMA_PPCF128
));
3358 case ISD::FP16_TO_FP32
:
3359 Results
.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32
, Node
, false));
3361 case ISD::FP32_TO_FP16
:
3362 Results
.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16
, Node
, false));
3364 case ISD::ConstantFP
: {
3365 ConstantFPSDNode
*CFP
= cast
<ConstantFPSDNode
>(Node
);
3366 // Check to see if this FP immediate is already legal.
3367 // If this is a legal constant, turn it into a TargetConstantFP node.
3368 if (TLI
.isFPImmLegal(CFP
->getValueAPF(), Node
->getValueType(0)))
3369 Results
.push_back(SDValue(Node
, 0));
3371 Results
.push_back(ExpandConstantFP(CFP
, true, DAG
, TLI
));
3374 case ISD::EHSELECTION
: {
3375 unsigned Reg
= TLI
.getExceptionSelectorRegister();
3376 assert(Reg
&& "Can't expand to unknown register!");
3377 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(1), dl
, Reg
,
3378 Node
->getValueType(0)));
3379 Results
.push_back(Results
[0].getValue(1));
3382 case ISD::EXCEPTIONADDR
: {
3383 unsigned Reg
= TLI
.getExceptionAddressRegister();
3384 assert(Reg
&& "Can't expand to unknown register!");
3385 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(0), dl
, Reg
,
3386 Node
->getValueType(0)));
3387 Results
.push_back(Results
[0].getValue(1));
3391 EVT VT
= Node
->getValueType(0);
3392 assert(TLI
.isOperationLegalOrCustom(ISD::ADD
, VT
) &&
3393 TLI
.isOperationLegalOrCustom(ISD::XOR
, VT
) &&
3394 "Don't know how to expand this subtraction!");
3395 Tmp1
= DAG
.getNode(ISD::XOR
, dl
, VT
, Node
->getOperand(1),
3396 DAG
.getConstant(APInt::getAllOnesValue(VT
.getSizeInBits()), VT
));
3397 Tmp1
= DAG
.getNode(ISD::ADD
, dl
, VT
, Tmp2
, DAG
.getConstant(1, VT
));
3398 Results
.push_back(DAG
.getNode(ISD::ADD
, dl
, VT
, Node
->getOperand(0), Tmp1
));
3403 EVT VT
= Node
->getValueType(0);
3404 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
3405 bool isSigned
= Node
->getOpcode() == ISD::SREM
;
3406 unsigned DivOpc
= isSigned
? ISD::SDIV
: ISD::UDIV
;
3407 unsigned DivRemOpc
= isSigned
? ISD::SDIVREM
: ISD::UDIVREM
;
3408 Tmp2
= Node
->getOperand(0);
3409 Tmp3
= Node
->getOperand(1);
3410 if (TLI
.isOperationLegalOrCustom(DivRemOpc
, VT
) ||
3411 (isDivRemLibcallAvailable(Node
, isSigned
, TLI
) &&
3412 UseDivRem(Node
, isSigned
, false))) {
3413 Tmp1
= DAG
.getNode(DivRemOpc
, dl
, VTs
, Tmp2
, Tmp3
).getValue(1);
3414 } else if (TLI
.isOperationLegalOrCustom(DivOpc
, VT
)) {
3416 Tmp1
= DAG
.getNode(DivOpc
, dl
, VT
, Tmp2
, Tmp3
);
3417 Tmp1
= DAG
.getNode(ISD::MUL
, dl
, VT
, Tmp1
, Tmp3
);
3418 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, VT
, Tmp2
, Tmp1
);
3419 } else if (isSigned
)
3420 Tmp1
= ExpandIntLibCall(Node
, true,
3422 RTLIB::SREM_I16
, RTLIB::SREM_I32
,
3423 RTLIB::SREM_I64
, RTLIB::SREM_I128
);
3425 Tmp1
= ExpandIntLibCall(Node
, false,
3427 RTLIB::UREM_I16
, RTLIB::UREM_I32
,
3428 RTLIB::UREM_I64
, RTLIB::UREM_I128
);
3429 Results
.push_back(Tmp1
);
3434 bool isSigned
= Node
->getOpcode() == ISD::SDIV
;
3435 unsigned DivRemOpc
= isSigned
? ISD::SDIVREM
: ISD::UDIVREM
;
3436 EVT VT
= Node
->getValueType(0);
3437 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
3438 if (TLI
.isOperationLegalOrCustom(DivRemOpc
, VT
) ||
3439 (isDivRemLibcallAvailable(Node
, isSigned
, TLI
) &&
3440 UseDivRem(Node
, isSigned
, true)))
3441 Tmp1
= DAG
.getNode(DivRemOpc
, dl
, VTs
, Node
->getOperand(0),
3442 Node
->getOperand(1));
3444 Tmp1
= ExpandIntLibCall(Node
, true,
3446 RTLIB::SDIV_I16
, RTLIB::SDIV_I32
,
3447 RTLIB::SDIV_I64
, RTLIB::SDIV_I128
);
3449 Tmp1
= ExpandIntLibCall(Node
, false,
3451 RTLIB::UDIV_I16
, RTLIB::UDIV_I32
,
3452 RTLIB::UDIV_I64
, RTLIB::UDIV_I128
);
3453 Results
.push_back(Tmp1
);
3458 unsigned ExpandOpcode
= Node
->getOpcode() == ISD::MULHU
? ISD::UMUL_LOHI
:
3460 EVT VT
= Node
->getValueType(0);
3461 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
3462 assert(TLI
.isOperationLegalOrCustom(ExpandOpcode
, VT
) &&
3463 "If this wasn't legal, it shouldn't have been created!");
3464 Tmp1
= DAG
.getNode(ExpandOpcode
, dl
, VTs
, Node
->getOperand(0),
3465 Node
->getOperand(1));
3466 Results
.push_back(Tmp1
.getValue(1));
3471 // Expand into divrem libcall
3472 ExpandDivRemLibCall(Node
, Results
);
3475 EVT VT
= Node
->getValueType(0);
3476 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
3477 // See if multiply or divide can be lowered using two-result operations.
3478 // We just need the low half of the multiply; try both the signed
3479 // and unsigned forms. If the target supports both SMUL_LOHI and
3480 // UMUL_LOHI, form a preference by checking which forms of plain
3481 // MULH it supports.
3482 bool HasSMUL_LOHI
= TLI
.isOperationLegalOrCustom(ISD::SMUL_LOHI
, VT
);
3483 bool HasUMUL_LOHI
= TLI
.isOperationLegalOrCustom(ISD::UMUL_LOHI
, VT
);
3484 bool HasMULHS
= TLI
.isOperationLegalOrCustom(ISD::MULHS
, VT
);
3485 bool HasMULHU
= TLI
.isOperationLegalOrCustom(ISD::MULHU
, VT
);
3486 unsigned OpToUse
= 0;
3487 if (HasSMUL_LOHI
&& !HasMULHS
) {
3488 OpToUse
= ISD::SMUL_LOHI
;
3489 } else if (HasUMUL_LOHI
&& !HasMULHU
) {
3490 OpToUse
= ISD::UMUL_LOHI
;
3491 } else if (HasSMUL_LOHI
) {
3492 OpToUse
= ISD::SMUL_LOHI
;
3493 } else if (HasUMUL_LOHI
) {
3494 OpToUse
= ISD::UMUL_LOHI
;
3497 Results
.push_back(DAG
.getNode(OpToUse
, dl
, VTs
, Node
->getOperand(0),
3498 Node
->getOperand(1)));
3501 Tmp1
= ExpandIntLibCall(Node
, false,
3503 RTLIB::MUL_I16
, RTLIB::MUL_I32
,
3504 RTLIB::MUL_I64
, RTLIB::MUL_I128
);
3505 Results
.push_back(Tmp1
);
3510 SDValue LHS
= Node
->getOperand(0);
3511 SDValue RHS
= Node
->getOperand(1);
3512 SDValue Sum
= DAG
.getNode(Node
->getOpcode() == ISD::SADDO
?
3513 ISD::ADD
: ISD::SUB
, dl
, LHS
.getValueType(),
3515 Results
.push_back(Sum
);
3516 EVT OType
= Node
->getValueType(1);
3518 SDValue Zero
= DAG
.getConstant(0, LHS
.getValueType());
3520 // LHSSign -> LHS >= 0
3521 // RHSSign -> RHS >= 0
3522 // SumSign -> Sum >= 0
3525 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
3527 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
3529 SDValue LHSSign
= DAG
.getSetCC(dl
, OType
, LHS
, Zero
, ISD::SETGE
);
3530 SDValue RHSSign
= DAG
.getSetCC(dl
, OType
, RHS
, Zero
, ISD::SETGE
);
3531 SDValue SignsMatch
= DAG
.getSetCC(dl
, OType
, LHSSign
, RHSSign
,
3532 Node
->getOpcode() == ISD::SADDO
?
3533 ISD::SETEQ
: ISD::SETNE
);
3535 SDValue SumSign
= DAG
.getSetCC(dl
, OType
, Sum
, Zero
, ISD::SETGE
);
3536 SDValue SumSignNE
= DAG
.getSetCC(dl
, OType
, LHSSign
, SumSign
, ISD::SETNE
);
3538 SDValue Cmp
= DAG
.getNode(ISD::AND
, dl
, OType
, SignsMatch
, SumSignNE
);
3539 Results
.push_back(Cmp
);
3544 SDValue LHS
= Node
->getOperand(0);
3545 SDValue RHS
= Node
->getOperand(1);
3546 SDValue Sum
= DAG
.getNode(Node
->getOpcode() == ISD::UADDO
?
3547 ISD::ADD
: ISD::SUB
, dl
, LHS
.getValueType(),
3549 Results
.push_back(Sum
);
3550 Results
.push_back(DAG
.getSetCC(dl
, Node
->getValueType(1), Sum
, LHS
,
3551 Node
->getOpcode () == ISD::UADDO
?
3552 ISD::SETULT
: ISD::SETUGT
));
3557 EVT VT
= Node
->getValueType(0);
3558 EVT WideVT
= EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits() * 2);
3559 SDValue LHS
= Node
->getOperand(0);
3560 SDValue RHS
= Node
->getOperand(1);
3563 static const unsigned Ops
[2][3] =
3564 { { ISD::MULHU
, ISD::UMUL_LOHI
, ISD::ZERO_EXTEND
},
3565 { ISD::MULHS
, ISD::SMUL_LOHI
, ISD::SIGN_EXTEND
}};
3566 bool isSigned
= Node
->getOpcode() == ISD::SMULO
;
3567 if (TLI
.isOperationLegalOrCustom(Ops
[isSigned
][0], VT
)) {
3568 BottomHalf
= DAG
.getNode(ISD::MUL
, dl
, VT
, LHS
, RHS
);
3569 TopHalf
= DAG
.getNode(Ops
[isSigned
][0], dl
, VT
, LHS
, RHS
);
3570 } else if (TLI
.isOperationLegalOrCustom(Ops
[isSigned
][1], VT
)) {
3571 BottomHalf
= DAG
.getNode(Ops
[isSigned
][1], dl
, DAG
.getVTList(VT
, VT
), LHS
,
3573 TopHalf
= BottomHalf
.getValue(1);
3574 } else if (TLI
.isTypeLegal(EVT::getIntegerVT(*DAG
.getContext(),
3575 VT
.getSizeInBits() * 2))) {
3576 LHS
= DAG
.getNode(Ops
[isSigned
][2], dl
, WideVT
, LHS
);
3577 RHS
= DAG
.getNode(Ops
[isSigned
][2], dl
, WideVT
, RHS
);
3578 Tmp1
= DAG
.getNode(ISD::MUL
, dl
, WideVT
, LHS
, RHS
);
3579 BottomHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Tmp1
,
3580 DAG
.getIntPtrConstant(0));
3581 TopHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Tmp1
,
3582 DAG
.getIntPtrConstant(1));
3584 // We can fall back to a libcall with an illegal type for the MUL if we
3585 // have a libcall big enough.
3586 // Also, we can fall back to a division in some cases, but that's a big
3587 // performance hit in the general case.
3588 RTLIB::Libcall LC
= RTLIB::UNKNOWN_LIBCALL
;
3589 if (WideVT
== MVT::i16
)
3590 LC
= RTLIB::MUL_I16
;
3591 else if (WideVT
== MVT::i32
)
3592 LC
= RTLIB::MUL_I32
;
3593 else if (WideVT
== MVT::i64
)
3594 LC
= RTLIB::MUL_I64
;
3595 else if (WideVT
== MVT::i128
)
3596 LC
= RTLIB::MUL_I128
;
3597 assert(LC
!= RTLIB::UNKNOWN_LIBCALL
&& "Cannot expand this operation!");
3599 // The high part is obtained by SRA'ing all but one of the bits of low
3601 unsigned LoSize
= VT
.getSizeInBits();
3602 SDValue HiLHS
= DAG
.getNode(ISD::SRA
, dl
, VT
, RHS
,
3603 DAG
.getConstant(LoSize
-1, TLI
.getPointerTy()));
3604 SDValue HiRHS
= DAG
.getNode(ISD::SRA
, dl
, VT
, LHS
,
3605 DAG
.getConstant(LoSize
-1, TLI
.getPointerTy()));
3607 // Here we're passing the 2 arguments explicitly as 4 arguments that are
3608 // pre-lowered to the correct types. This all depends upon WideVT not
3609 // being a legal type for the architecture and thus has to be split to
3611 SDValue Args
[] = { LHS
, HiLHS
, RHS
, HiRHS
};
3612 SDValue Ret
= ExpandLibCall(LC
, WideVT
, Args
, 4, isSigned
, dl
);
3613 BottomHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Ret
,
3614 DAG
.getIntPtrConstant(0));
3615 TopHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Ret
,
3616 DAG
.getIntPtrConstant(1));
3620 Tmp1
= DAG
.getConstant(VT
.getSizeInBits() - 1,
3621 TLI
.getShiftAmountTy(BottomHalf
.getValueType()));
3622 Tmp1
= DAG
.getNode(ISD::SRA
, dl
, VT
, BottomHalf
, Tmp1
);
3623 TopHalf
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
), TopHalf
, Tmp1
,
3626 TopHalf
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
), TopHalf
,
3627 DAG
.getConstant(0, VT
), ISD::SETNE
);
3629 Results
.push_back(BottomHalf
);
3630 Results
.push_back(TopHalf
);
3633 case ISD::BUILD_PAIR
: {
3634 EVT PairTy
= Node
->getValueType(0);
3635 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, PairTy
, Node
->getOperand(0));
3636 Tmp2
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, PairTy
, Node
->getOperand(1));
3637 Tmp2
= DAG
.getNode(ISD::SHL
, dl
, PairTy
, Tmp2
,
3638 DAG
.getConstant(PairTy
.getSizeInBits()/2,
3639 TLI
.getShiftAmountTy(PairTy
)));
3640 Results
.push_back(DAG
.getNode(ISD::OR
, dl
, PairTy
, Tmp1
, Tmp2
));
3644 Tmp1
= Node
->getOperand(0);
3645 Tmp2
= Node
->getOperand(1);
3646 Tmp3
= Node
->getOperand(2);
3647 if (Tmp1
.getOpcode() == ISD::SETCC
) {
3648 Tmp1
= DAG
.getSelectCC(dl
, Tmp1
.getOperand(0), Tmp1
.getOperand(1),
3650 cast
<CondCodeSDNode
>(Tmp1
.getOperand(2))->get());
3652 Tmp1
= DAG
.getSelectCC(dl
, Tmp1
,
3653 DAG
.getConstant(0, Tmp1
.getValueType()),
3654 Tmp2
, Tmp3
, ISD::SETNE
);
3656 Results
.push_back(Tmp1
);
3659 SDValue Chain
= Node
->getOperand(0);
3660 SDValue Table
= Node
->getOperand(1);
3661 SDValue Index
= Node
->getOperand(2);
3663 EVT PTy
= TLI
.getPointerTy();
3665 const TargetData
&TD
= *TLI
.getTargetData();
3666 unsigned EntrySize
=
3667 DAG
.getMachineFunction().getJumpTableInfo()->getEntrySize(TD
);
3669 Index
= DAG
.getNode(ISD::MUL
, dl
, PTy
,
3670 Index
, DAG
.getConstant(EntrySize
, PTy
));
3671 SDValue Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Index
, Table
);
3673 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(), EntrySize
* 8);
3674 SDValue LD
= DAG
.getExtLoad(ISD::SEXTLOAD
, dl
, PTy
, Chain
, Addr
,
3675 MachinePointerInfo::getJumpTable(), MemVT
,
3678 if (TM
.getRelocationModel() == Reloc::PIC_
) {
3679 // For PIC, the sequence is:
3680 // BRIND(load(Jumptable + index) + RelocBase)
3681 // RelocBase can be JumpTable, GOT or some sort of global base.
3682 Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Addr
,
3683 TLI
.getPICJumpTableRelocBase(Table
, DAG
));
3685 Tmp1
= DAG
.getNode(ISD::BRIND
, dl
, MVT::Other
, LD
.getValue(1), Addr
);
3686 Results
.push_back(Tmp1
);
3690 // Expand brcond's setcc into its constituent parts and create a BR_CC
3692 Tmp1
= Node
->getOperand(0);
3693 Tmp2
= Node
->getOperand(1);
3694 if (Tmp2
.getOpcode() == ISD::SETCC
) {
3695 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, MVT::Other
,
3696 Tmp1
, Tmp2
.getOperand(2),
3697 Tmp2
.getOperand(0), Tmp2
.getOperand(1),
3698 Node
->getOperand(2));
3700 // We test only the i1 bit. Skip the AND if UNDEF.
3701 Tmp3
= (Tmp2
.getOpcode() == ISD::UNDEF
) ? Tmp2
:
3702 DAG
.getNode(ISD::AND
, dl
, Tmp2
.getValueType(), Tmp2
,
3703 DAG
.getConstant(1, Tmp2
.getValueType()));
3704 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, MVT::Other
, Tmp1
,
3705 DAG
.getCondCode(ISD::SETNE
), Tmp3
,
3706 DAG
.getConstant(0, Tmp3
.getValueType()),
3707 Node
->getOperand(2));
3709 Results
.push_back(Tmp1
);
3712 Tmp1
= Node
->getOperand(0);
3713 Tmp2
= Node
->getOperand(1);
3714 Tmp3
= Node
->getOperand(2);
3715 LegalizeSetCCCondCode(Node
->getValueType(0), Tmp1
, Tmp2
, Tmp3
, dl
);
3717 // If we expanded the SETCC into an AND/OR, return the new node
3718 if (Tmp2
.getNode() == 0) {
3719 Results
.push_back(Tmp1
);
3723 // Otherwise, SETCC for the given comparison type must be completely
3724 // illegal; expand it into a SELECT_CC.
3725 EVT VT
= Node
->getValueType(0);
3726 Tmp1
= DAG
.getNode(ISD::SELECT_CC
, dl
, VT
, Tmp1
, Tmp2
,
3727 DAG
.getConstant(1, VT
), DAG
.getConstant(0, VT
), Tmp3
);
3728 Results
.push_back(Tmp1
);
3731 case ISD::SELECT_CC
: {
3732 Tmp1
= Node
->getOperand(0); // LHS
3733 Tmp2
= Node
->getOperand(1); // RHS
3734 Tmp3
= Node
->getOperand(2); // True
3735 Tmp4
= Node
->getOperand(3); // False
3736 SDValue CC
= Node
->getOperand(4);
3738 LegalizeSetCCCondCode(TLI
.getSetCCResultType(Tmp1
.getValueType()),
3739 Tmp1
, Tmp2
, CC
, dl
);
3741 assert(!Tmp2
.getNode() && "Can't legalize SELECT_CC with legal condition!");
3742 Tmp2
= DAG
.getConstant(0, Tmp1
.getValueType());
3743 CC
= DAG
.getCondCode(ISD::SETNE
);
3744 Tmp1
= DAG
.getNode(ISD::SELECT_CC
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
3746 Results
.push_back(Tmp1
);
3750 Tmp1
= Node
->getOperand(0); // Chain
3751 Tmp2
= Node
->getOperand(2); // LHS
3752 Tmp3
= Node
->getOperand(3); // RHS
3753 Tmp4
= Node
->getOperand(1); // CC
3755 LegalizeSetCCCondCode(TLI
.getSetCCResultType(Tmp2
.getValueType()),
3756 Tmp2
, Tmp3
, Tmp4
, dl
);
3757 assert(LastCALLSEQ
.size() == 1 && "branch inside CALLSEQ_BEGIN/END?");
3758 setLastCALLSEQ(DAG
.getEntryNode());
3760 assert(!Tmp3
.getNode() && "Can't legalize BR_CC with legal condition!");
3761 Tmp3
= DAG
.getConstant(0, Tmp2
.getValueType());
3762 Tmp4
= DAG
.getCondCode(ISD::SETNE
);
3763 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, Node
->getValueType(0), Tmp1
, Tmp4
, Tmp2
,
3764 Tmp3
, Node
->getOperand(4));
3765 Results
.push_back(Tmp1
);
3768 case ISD::GLOBAL_OFFSET_TABLE
:
3769 case ISD::GlobalAddress
:
3770 case ISD::GlobalTLSAddress
:
3771 case ISD::ExternalSymbol
:
3772 case ISD::ConstantPool
:
3773 case ISD::JumpTable
:
3774 case ISD::INTRINSIC_W_CHAIN
:
3775 case ISD::INTRINSIC_WO_CHAIN
:
3776 case ISD::INTRINSIC_VOID
:
3777 // FIXME: Custom lowering for these operations shouldn't return null!
3778 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
3779 Results
.push_back(SDValue(Node
, i
));
3783 void SelectionDAGLegalize::PromoteNode(SDNode
*Node
,
3784 SmallVectorImpl
<SDValue
> &Results
) {
3785 EVT OVT
= Node
->getValueType(0);
3786 if (Node
->getOpcode() == ISD::UINT_TO_FP
||
3787 Node
->getOpcode() == ISD::SINT_TO_FP
||
3788 Node
->getOpcode() == ISD::SETCC
) {
3789 OVT
= Node
->getOperand(0).getValueType();
3791 EVT NVT
= TLI
.getTypeToPromoteTo(Node
->getOpcode(), OVT
);
3792 DebugLoc dl
= Node
->getDebugLoc();
3793 SDValue Tmp1
, Tmp2
, Tmp3
;
3794 switch (Node
->getOpcode()) {
3798 // Zero extend the argument.
3799 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, NVT
, Node
->getOperand(0));
3800 // Perform the larger operation.
3801 Tmp1
= DAG
.getNode(Node
->getOpcode(), dl
, NVT
, Tmp1
);
3802 if (Node
->getOpcode() == ISD::CTTZ
) {
3803 //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT)
3804 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(NVT
),
3805 Tmp1
, DAG
.getConstant(NVT
.getSizeInBits(), NVT
),
3807 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp2
,
3808 DAG
.getConstant(OVT
.getSizeInBits(), NVT
), Tmp1
);
3809 } else if (Node
->getOpcode() == ISD::CTLZ
) {
3810 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
3811 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, NVT
, Tmp1
,
3812 DAG
.getConstant(NVT
.getSizeInBits() -
3813 OVT
.getSizeInBits(), NVT
));
3815 Results
.push_back(DAG
.getNode(ISD::TRUNCATE
, dl
, OVT
, Tmp1
));
3818 unsigned DiffBits
= NVT
.getSizeInBits() - OVT
.getSizeInBits();
3819 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, NVT
, Node
->getOperand(0));
3820 Tmp1
= DAG
.getNode(ISD::BSWAP
, dl
, NVT
, Tmp1
);
3821 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, NVT
, Tmp1
,
3822 DAG
.getConstant(DiffBits
, TLI
.getShiftAmountTy(NVT
)));
3823 Results
.push_back(Tmp1
);
3826 case ISD::FP_TO_UINT
:
3827 case ISD::FP_TO_SINT
:
3828 Tmp1
= PromoteLegalFP_TO_INT(Node
->getOperand(0), Node
->getValueType(0),
3829 Node
->getOpcode() == ISD::FP_TO_SINT
, dl
);
3830 Results
.push_back(Tmp1
);
3832 case ISD::UINT_TO_FP
:
3833 case ISD::SINT_TO_FP
:
3834 Tmp1
= PromoteLegalINT_TO_FP(Node
->getOperand(0), Node
->getValueType(0),
3835 Node
->getOpcode() == ISD::SINT_TO_FP
, dl
);
3836 Results
.push_back(Tmp1
);
3841 unsigned ExtOp
, TruncOp
;
3842 if (OVT
.isVector()) {
3843 ExtOp
= ISD::BITCAST
;
3844 TruncOp
= ISD::BITCAST
;
3846 assert(OVT
.isInteger() && "Cannot promote logic operation");
3847 ExtOp
= ISD::ANY_EXTEND
;
3848 TruncOp
= ISD::TRUNCATE
;
3850 // Promote each of the values to the new type.
3851 Tmp1
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(0));
3852 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3853 // Perform the larger operation, then convert back
3854 Tmp1
= DAG
.getNode(Node
->getOpcode(), dl
, NVT
, Tmp1
, Tmp2
);
3855 Results
.push_back(DAG
.getNode(TruncOp
, dl
, OVT
, Tmp1
));
3859 unsigned ExtOp
, TruncOp
;
3860 if (Node
->getValueType(0).isVector()) {
3861 ExtOp
= ISD::BITCAST
;
3862 TruncOp
= ISD::BITCAST
;
3863 } else if (Node
->getValueType(0).isInteger()) {
3864 ExtOp
= ISD::ANY_EXTEND
;
3865 TruncOp
= ISD::TRUNCATE
;
3867 ExtOp
= ISD::FP_EXTEND
;
3868 TruncOp
= ISD::FP_ROUND
;
3870 Tmp1
= Node
->getOperand(0);
3871 // Promote each of the values to the new type.
3872 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3873 Tmp3
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(2));
3874 // Perform the larger operation, then round down.
3875 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp1
, Tmp2
, Tmp3
);
3876 if (TruncOp
!= ISD::FP_ROUND
)
3877 Tmp1
= DAG
.getNode(TruncOp
, dl
, Node
->getValueType(0), Tmp1
);
3879 Tmp1
= DAG
.getNode(TruncOp
, dl
, Node
->getValueType(0), Tmp1
,
3880 DAG
.getIntPtrConstant(0));
3881 Results
.push_back(Tmp1
);
3884 case ISD::VECTOR_SHUFFLE
: {
3885 SmallVector
<int, 8> Mask
;
3886 cast
<ShuffleVectorSDNode
>(Node
)->getMask(Mask
);
3888 // Cast the two input vectors.
3889 Tmp1
= DAG
.getNode(ISD::BITCAST
, dl
, NVT
, Node
->getOperand(0));
3890 Tmp2
= DAG
.getNode(ISD::BITCAST
, dl
, NVT
, Node
->getOperand(1));
3892 // Convert the shuffle mask to the right # elements.
3893 Tmp1
= ShuffleWithNarrowerEltType(NVT
, OVT
, dl
, Tmp1
, Tmp2
, Mask
);
3894 Tmp1
= DAG
.getNode(ISD::BITCAST
, dl
, OVT
, Tmp1
);
3895 Results
.push_back(Tmp1
);
3899 unsigned ExtOp
= ISD::FP_EXTEND
;
3900 if (NVT
.isInteger()) {
3901 ISD::CondCode CCCode
=
3902 cast
<CondCodeSDNode
>(Node
->getOperand(2))->get();
3903 ExtOp
= isSignedIntSetCC(CCCode
) ? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
;
3905 Tmp1
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(0));
3906 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3907 Results
.push_back(DAG
.getNode(ISD::SETCC
, dl
, Node
->getValueType(0),
3908 Tmp1
, Tmp2
, Node
->getOperand(2)));
3914 // SelectionDAG::Legalize - This is the entry point for the file.
3916 void SelectionDAG::Legalize() {
3917 /// run - This is the main entry point to this class.
3919 SelectionDAGLegalize(*this).LegalizeDAG();