1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
11 //===----------------------------------------------------------------------===//
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/BlockFrequencyInfo.h"
31 #include "llvm/Analysis/BranchProbabilityInfo.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/EHPersonalities.h"
34 #include "llvm/Analysis/Loads.h"
35 #include "llvm/Analysis/MemoryLocation.h"
36 #include "llvm/Analysis/ProfileSummaryInfo.h"
37 #include "llvm/Analysis/TargetLibraryInfo.h"
38 #include "llvm/Analysis/ValueTracking.h"
39 #include "llvm/Analysis/VectorUtils.h"
40 #include "llvm/CodeGen/Analysis.h"
41 #include "llvm/CodeGen/FunctionLoweringInfo.h"
42 #include "llvm/CodeGen/GCMetadata.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/MachineBasicBlock.h"
45 #include "llvm/CodeGen/MachineFrameInfo.h"
46 #include "llvm/CodeGen/MachineFunction.h"
47 #include "llvm/CodeGen/MachineInstr.h"
48 #include "llvm/CodeGen/MachineInstrBuilder.h"
49 #include "llvm/CodeGen/MachineJumpTableInfo.h"
50 #include "llvm/CodeGen/MachineMemOperand.h"
51 #include "llvm/CodeGen/MachineModuleInfo.h"
52 #include "llvm/CodeGen/MachineOperand.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/CodeGen/RuntimeLibcalls.h"
55 #include "llvm/CodeGen/SelectionDAG.h"
56 #include "llvm/CodeGen/SelectionDAGNodes.h"
57 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
58 #include "llvm/CodeGen/StackMaps.h"
59 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
60 #include "llvm/CodeGen/TargetFrameLowering.h"
61 #include "llvm/CodeGen/TargetInstrInfo.h"
62 #include "llvm/CodeGen/TargetLowering.h"
63 #include "llvm/CodeGen/TargetOpcodes.h"
64 #include "llvm/CodeGen/TargetRegisterInfo.h"
65 #include "llvm/CodeGen/TargetSubtargetInfo.h"
66 #include "llvm/CodeGen/ValueTypes.h"
67 #include "llvm/CodeGen/WinEHFuncInfo.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CFG.h"
72 #include "llvm/IR/CallSite.h"
73 #include "llvm/IR/CallingConv.h"
74 #include "llvm/IR/Constant.h"
75 #include "llvm/IR/ConstantRange.h"
76 #include "llvm/IR/Constants.h"
77 #include "llvm/IR/DataLayout.h"
78 #include "llvm/IR/DebugInfoMetadata.h"
79 #include "llvm/IR/DebugLoc.h"
80 #include "llvm/IR/DerivedTypes.h"
81 #include "llvm/IR/Function.h"
82 #include "llvm/IR/GetElementPtrTypeIterator.h"
83 #include "llvm/IR/InlineAsm.h"
84 #include "llvm/IR/InstrTypes.h"
85 #include "llvm/IR/Instruction.h"
86 #include "llvm/IR/Instructions.h"
87 #include "llvm/IR/IntrinsicInst.h"
88 #include "llvm/IR/Intrinsics.h"
89 #include "llvm/IR/IntrinsicsAArch64.h"
90 #include "llvm/IR/IntrinsicsWebAssembly.h"
91 #include "llvm/IR/LLVMContext.h"
92 #include "llvm/IR/Metadata.h"
93 #include "llvm/IR/Module.h"
94 #include "llvm/IR/Operator.h"
95 #include "llvm/IR/PatternMatch.h"
96 #include "llvm/IR/Statepoint.h"
97 #include "llvm/IR/Type.h"
98 #include "llvm/IR/User.h"
99 #include "llvm/IR/Value.h"
100 #include "llvm/MC/MCContext.h"
101 #include "llvm/MC/MCSymbol.h"
102 #include "llvm/Support/AtomicOrdering.h"
103 #include "llvm/Support/BranchProbability.h"
104 #include "llvm/Support/Casting.h"
105 #include "llvm/Support/CodeGen.h"
106 #include "llvm/Support/CommandLine.h"
107 #include "llvm/Support/Compiler.h"
108 #include "llvm/Support/Debug.h"
109 #include "llvm/Support/ErrorHandling.h"
110 #include "llvm/Support/MachineValueType.h"
111 #include "llvm/Support/MathExtras.h"
112 #include "llvm/Support/raw_ostream.h"
113 #include "llvm/Target/TargetIntrinsicInfo.h"
114 #include "llvm/Target/TargetMachine.h"
115 #include "llvm/Target/TargetOptions.h"
116 #include "llvm/Transforms/Utils/Local.h"
129 using namespace llvm
;
130 using namespace PatternMatch
;
131 using namespace SwitchCG
;
133 #define DEBUG_TYPE "isel"
135 /// LimitFloatPrecision - Generate low-precision inline sequences for
136 /// some float libcalls (6, 8 or 12 bits).
137 static unsigned LimitFloatPrecision
;
139 static cl::opt
<unsigned, true>
140 LimitFPPrecision("limit-float-precision",
141 cl::desc("Generate low-precision inline sequences "
142 "for some float libcalls"),
143 cl::location(LimitFloatPrecision
), cl::Hidden
,
146 static cl::opt
<unsigned> SwitchPeelThreshold(
147 "switch-peel-threshold", cl::Hidden
, cl::init(66),
148 cl::desc("Set the case probability threshold for peeling the case from a "
149 "switch statement. A value greater than 100 will void this "
152 // Limit the width of DAG chains. This is important in general to prevent
153 // DAG-based analysis from blowing up. For example, alias analysis and
154 // load clustering may not complete in reasonable time. It is difficult to
155 // recognize and avoid this situation within each individual analysis, and
156 // future analyses are likely to have the same behavior. Limiting DAG width is
157 // the safe approach and will be especially important with global DAGs.
159 // MaxParallelChains default is arbitrarily high to avoid affecting
160 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
161 // sequence over this should have been converted to llvm.memcpy by the
162 // frontend. It is easy to induce this behavior with .ll code such as:
163 // %buffer = alloca [4096 x i8]
164 // %data = load [4096 x i8]* %argPtr
165 // store [4096 x i8] %data, [4096 x i8]* %buffer
166 static const unsigned MaxParallelChains
= 64;
168 // Return the calling convention if the Value passed requires ABI mangling as it
169 // is a parameter to a function or a return value from a function which is not
171 static Optional
<CallingConv::ID
> getABIRegCopyCC(const Value
*V
) {
172 if (auto *R
= dyn_cast
<ReturnInst
>(V
))
173 return R
->getParent()->getParent()->getCallingConv();
175 if (auto *CI
= dyn_cast
<CallInst
>(V
)) {
176 const bool IsInlineAsm
= CI
->isInlineAsm();
177 const bool IsIndirectFunctionCall
=
178 !IsInlineAsm
&& !CI
->getCalledFunction();
180 // It is possible that the call instruction is an inline asm statement or an
181 // indirect function call in which case the return value of
182 // getCalledFunction() would be nullptr.
183 const bool IsInstrinsicCall
=
184 !IsInlineAsm
&& !IsIndirectFunctionCall
&&
185 CI
->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic
;
187 if (!IsInlineAsm
&& !IsInstrinsicCall
)
188 return CI
->getCallingConv();
194 static SDValue
getCopyFromPartsVector(SelectionDAG
&DAG
, const SDLoc
&DL
,
195 const SDValue
*Parts
, unsigned NumParts
,
196 MVT PartVT
, EVT ValueVT
, const Value
*V
,
197 Optional
<CallingConv::ID
> CC
);
199 /// getCopyFromParts - Create a value that contains the specified legal parts
200 /// combined into the value they represent. If the parts combine to a type
201 /// larger than ValueVT then AssertOp can be used to specify whether the extra
202 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
203 /// (ISD::AssertSext).
204 static SDValue
getCopyFromParts(SelectionDAG
&DAG
, const SDLoc
&DL
,
205 const SDValue
*Parts
, unsigned NumParts
,
206 MVT PartVT
, EVT ValueVT
, const Value
*V
,
207 Optional
<CallingConv::ID
> CC
= None
,
208 Optional
<ISD::NodeType
> AssertOp
= None
) {
209 if (ValueVT
.isVector())
210 return getCopyFromPartsVector(DAG
, DL
, Parts
, NumParts
, PartVT
, ValueVT
, V
,
213 assert(NumParts
> 0 && "No parts to assemble!");
214 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
215 SDValue Val
= Parts
[0];
218 // Assemble the value from multiple parts.
219 if (ValueVT
.isInteger()) {
220 unsigned PartBits
= PartVT
.getSizeInBits();
221 unsigned ValueBits
= ValueVT
.getSizeInBits();
223 // Assemble the power of 2 part.
224 unsigned RoundParts
=
225 (NumParts
& (NumParts
- 1)) ? 1 << Log2_32(NumParts
) : NumParts
;
226 unsigned RoundBits
= PartBits
* RoundParts
;
227 EVT RoundVT
= RoundBits
== ValueBits
?
228 ValueVT
: EVT::getIntegerVT(*DAG
.getContext(), RoundBits
);
231 EVT HalfVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundBits
/2);
233 if (RoundParts
> 2) {
234 Lo
= getCopyFromParts(DAG
, DL
, Parts
, RoundParts
/ 2,
236 Hi
= getCopyFromParts(DAG
, DL
, Parts
+ RoundParts
/ 2,
237 RoundParts
/ 2, PartVT
, HalfVT
, V
);
239 Lo
= DAG
.getNode(ISD::BITCAST
, DL
, HalfVT
, Parts
[0]);
240 Hi
= DAG
.getNode(ISD::BITCAST
, DL
, HalfVT
, Parts
[1]);
243 if (DAG
.getDataLayout().isBigEndian())
246 Val
= DAG
.getNode(ISD::BUILD_PAIR
, DL
, RoundVT
, Lo
, Hi
);
248 if (RoundParts
< NumParts
) {
249 // Assemble the trailing non-power-of-2 part.
250 unsigned OddParts
= NumParts
- RoundParts
;
251 EVT OddVT
= EVT::getIntegerVT(*DAG
.getContext(), OddParts
* PartBits
);
252 Hi
= getCopyFromParts(DAG
, DL
, Parts
+ RoundParts
, OddParts
, PartVT
,
255 // Combine the round and odd parts.
257 if (DAG
.getDataLayout().isBigEndian())
259 EVT TotalVT
= EVT::getIntegerVT(*DAG
.getContext(), NumParts
* PartBits
);
260 Hi
= DAG
.getNode(ISD::ANY_EXTEND
, DL
, TotalVT
, Hi
);
262 DAG
.getNode(ISD::SHL
, DL
, TotalVT
, Hi
,
263 DAG
.getConstant(Lo
.getValueSizeInBits(), DL
,
264 TLI
.getPointerTy(DAG
.getDataLayout())));
265 Lo
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, TotalVT
, Lo
);
266 Val
= DAG
.getNode(ISD::OR
, DL
, TotalVT
, Lo
, Hi
);
268 } else if (PartVT
.isFloatingPoint()) {
269 // FP split into multiple FP parts (for ppcf128)
270 assert(ValueVT
== EVT(MVT::ppcf128
) && PartVT
== MVT::f64
&&
273 Lo
= DAG
.getNode(ISD::BITCAST
, DL
, EVT(MVT::f64
), Parts
[0]);
274 Hi
= DAG
.getNode(ISD::BITCAST
, DL
, EVT(MVT::f64
), Parts
[1]);
275 if (TLI
.hasBigEndianPartOrdering(ValueVT
, DAG
.getDataLayout()))
277 Val
= DAG
.getNode(ISD::BUILD_PAIR
, DL
, ValueVT
, Lo
, Hi
);
279 // FP split into integer parts (soft fp)
280 assert(ValueVT
.isFloatingPoint() && PartVT
.isInteger() &&
281 !PartVT
.isVector() && "Unexpected split");
282 EVT IntVT
= EVT::getIntegerVT(*DAG
.getContext(), ValueVT
.getSizeInBits());
283 Val
= getCopyFromParts(DAG
, DL
, Parts
, NumParts
, PartVT
, IntVT
, V
, CC
);
287 // There is now one part, held in Val. Correct it to match ValueVT.
288 // PartEVT is the type of the register class that holds the value.
289 // ValueVT is the type of the inline asm operation.
290 EVT PartEVT
= Val
.getValueType();
292 if (PartEVT
== ValueVT
)
295 if (PartEVT
.isInteger() && ValueVT
.isFloatingPoint() &&
296 ValueVT
.bitsLT(PartEVT
)) {
297 // For an FP value in an integer part, we need to truncate to the right
299 PartEVT
= EVT::getIntegerVT(*DAG
.getContext(), ValueVT
.getSizeInBits());
300 Val
= DAG
.getNode(ISD::TRUNCATE
, DL
, PartEVT
, Val
);
303 // Handle types that have the same size.
304 if (PartEVT
.getSizeInBits() == ValueVT
.getSizeInBits())
305 return DAG
.getNode(ISD::BITCAST
, DL
, ValueVT
, Val
);
307 // Handle types with different sizes.
308 if (PartEVT
.isInteger() && ValueVT
.isInteger()) {
309 if (ValueVT
.bitsLT(PartEVT
)) {
310 // For a truncate, see if we have any information to
311 // indicate whether the truncated bits will always be
312 // zero or sign-extension.
313 if (AssertOp
.hasValue())
314 Val
= DAG
.getNode(*AssertOp
, DL
, PartEVT
, Val
,
315 DAG
.getValueType(ValueVT
));
316 return DAG
.getNode(ISD::TRUNCATE
, DL
, ValueVT
, Val
);
318 return DAG
.getNode(ISD::ANY_EXTEND
, DL
, ValueVT
, Val
);
321 if (PartEVT
.isFloatingPoint() && ValueVT
.isFloatingPoint()) {
322 // FP_ROUND's are always exact here.
323 if (ValueVT
.bitsLT(Val
.getValueType()))
325 ISD::FP_ROUND
, DL
, ValueVT
, Val
,
326 DAG
.getTargetConstant(1, DL
, TLI
.getPointerTy(DAG
.getDataLayout())));
328 return DAG
.getNode(ISD::FP_EXTEND
, DL
, ValueVT
, Val
);
331 // Handle MMX to a narrower integer type by bitcasting MMX to integer and
333 if (PartEVT
== MVT::x86mmx
&& ValueVT
.isInteger() &&
334 ValueVT
.bitsLT(PartEVT
)) {
335 Val
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::i64
, Val
);
336 return DAG
.getNode(ISD::TRUNCATE
, DL
, ValueVT
, Val
);
339 report_fatal_error("Unknown mismatch in getCopyFromParts!");
342 static void diagnosePossiblyInvalidConstraint(LLVMContext
&Ctx
, const Value
*V
,
343 const Twine
&ErrMsg
) {
344 const Instruction
*I
= dyn_cast_or_null
<Instruction
>(V
);
346 return Ctx
.emitError(ErrMsg
);
348 const char *AsmError
= ", possible invalid constraint for vector type";
349 if (const CallInst
*CI
= dyn_cast
<CallInst
>(I
))
350 if (isa
<InlineAsm
>(CI
->getCalledValue()))
351 return Ctx
.emitError(I
, ErrMsg
+ AsmError
);
353 return Ctx
.emitError(I
, ErrMsg
);
356 /// getCopyFromPartsVector - Create a value that contains the specified legal
357 /// parts combined into the value they represent. If the parts combine to a
358 /// type larger than ValueVT then AssertOp can be used to specify whether the
359 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
360 /// ValueVT (ISD::AssertSext).
361 static SDValue
getCopyFromPartsVector(SelectionDAG
&DAG
, const SDLoc
&DL
,
362 const SDValue
*Parts
, unsigned NumParts
,
363 MVT PartVT
, EVT ValueVT
, const Value
*V
,
364 Optional
<CallingConv::ID
> CallConv
) {
365 assert(ValueVT
.isVector() && "Not a vector value");
366 assert(NumParts
> 0 && "No parts to assemble!");
367 const bool IsABIRegCopy
= CallConv
.hasValue();
369 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
370 SDValue Val
= Parts
[0];
372 // Handle a multi-element vector.
376 unsigned NumIntermediates
;
380 NumRegs
= TLI
.getVectorTypeBreakdownForCallingConv(
381 *DAG
.getContext(), CallConv
.getValue(), ValueVT
, IntermediateVT
,
382 NumIntermediates
, RegisterVT
);
385 TLI
.getVectorTypeBreakdown(*DAG
.getContext(), ValueVT
, IntermediateVT
,
386 NumIntermediates
, RegisterVT
);
389 assert(NumRegs
== NumParts
&& "Part count doesn't match vector breakdown!");
390 NumParts
= NumRegs
; // Silence a compiler warning.
391 assert(RegisterVT
== PartVT
&& "Part type doesn't match vector breakdown!");
392 assert(RegisterVT
.getSizeInBits() ==
393 Parts
[0].getSimpleValueType().getSizeInBits() &&
394 "Part type sizes don't match!");
396 // Assemble the parts into intermediate operands.
397 SmallVector
<SDValue
, 8> Ops(NumIntermediates
);
398 if (NumIntermediates
== NumParts
) {
399 // If the register was not expanded, truncate or copy the value,
401 for (unsigned i
= 0; i
!= NumParts
; ++i
)
402 Ops
[i
] = getCopyFromParts(DAG
, DL
, &Parts
[i
], 1,
403 PartVT
, IntermediateVT
, V
);
404 } else if (NumParts
> 0) {
405 // If the intermediate type was expanded, build the intermediate
406 // operands from the parts.
407 assert(NumParts
% NumIntermediates
== 0 &&
408 "Must expand into a divisible number of parts!");
409 unsigned Factor
= NumParts
/ NumIntermediates
;
410 for (unsigned i
= 0; i
!= NumIntermediates
; ++i
)
411 Ops
[i
] = getCopyFromParts(DAG
, DL
, &Parts
[i
* Factor
], Factor
,
412 PartVT
, IntermediateVT
, V
);
415 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
416 // intermediate operands.
418 EVT::getVectorVT(*DAG
.getContext(), IntermediateVT
.getScalarType(),
419 (IntermediateVT
.isVector()
420 ? IntermediateVT
.getVectorNumElements() * NumParts
421 : NumIntermediates
));
422 Val
= DAG
.getNode(IntermediateVT
.isVector() ? ISD::CONCAT_VECTORS
424 DL
, BuiltVectorTy
, Ops
);
427 // There is now one part, held in Val. Correct it to match ValueVT.
428 EVT PartEVT
= Val
.getValueType();
430 if (PartEVT
== ValueVT
)
433 if (PartEVT
.isVector()) {
434 // If the element type of the source/dest vectors are the same, but the
435 // parts vector has more elements than the value vector, then we have a
436 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
438 if (PartEVT
.getVectorElementType() == ValueVT
.getVectorElementType()) {
439 assert(PartEVT
.getVectorNumElements() > ValueVT
.getVectorNumElements() &&
440 "Cannot narrow, it would be a lossy transformation");
442 ISD::EXTRACT_SUBVECTOR
, DL
, ValueVT
, Val
,
443 DAG
.getConstant(0, DL
, TLI
.getVectorIdxTy(DAG
.getDataLayout())));
446 // Vector/Vector bitcast.
447 if (ValueVT
.getSizeInBits() == PartEVT
.getSizeInBits())
448 return DAG
.getNode(ISD::BITCAST
, DL
, ValueVT
, Val
);
450 assert(PartEVT
.getVectorNumElements() == ValueVT
.getVectorNumElements() &&
451 "Cannot handle this kind of promotion");
452 // Promoted vector extract
453 return DAG
.getAnyExtOrTrunc(Val
, DL
, ValueVT
);
457 // Trivial bitcast if the types are the same size and the destination
458 // vector type is legal.
459 if (PartEVT
.getSizeInBits() == ValueVT
.getSizeInBits() &&
460 TLI
.isTypeLegal(ValueVT
))
461 return DAG
.getNode(ISD::BITCAST
, DL
, ValueVT
, Val
);
463 if (ValueVT
.getVectorNumElements() != 1) {
464 // Certain ABIs require that vectors are passed as integers. For vectors
465 // are the same size, this is an obvious bitcast.
466 if (ValueVT
.getSizeInBits() == PartEVT
.getSizeInBits()) {
467 return DAG
.getNode(ISD::BITCAST
, DL
, ValueVT
, Val
);
468 } else if (ValueVT
.getSizeInBits() < PartEVT
.getSizeInBits()) {
469 // Bitcast Val back the original type and extract the corresponding
471 unsigned Elts
= PartEVT
.getSizeInBits() / ValueVT
.getScalarSizeInBits();
472 EVT WiderVecType
= EVT::getVectorVT(*DAG
.getContext(),
473 ValueVT
.getVectorElementType(), Elts
);
474 Val
= DAG
.getBitcast(WiderVecType
, Val
);
476 ISD::EXTRACT_SUBVECTOR
, DL
, ValueVT
, Val
,
477 DAG
.getConstant(0, DL
, TLI
.getVectorIdxTy(DAG
.getDataLayout())));
480 diagnosePossiblyInvalidConstraint(
481 *DAG
.getContext(), V
, "non-trivial scalar-to-vector conversion");
482 return DAG
.getUNDEF(ValueVT
);
485 // Handle cases such as i8 -> <1 x i1>
486 EVT ValueSVT
= ValueVT
.getVectorElementType();
487 if (ValueVT
.getVectorNumElements() == 1 && ValueSVT
!= PartEVT
)
488 Val
= ValueVT
.isFloatingPoint() ? DAG
.getFPExtendOrRound(Val
, DL
, ValueSVT
)
489 : DAG
.getAnyExtOrTrunc(Val
, DL
, ValueSVT
);
491 return DAG
.getBuildVector(ValueVT
, DL
, Val
);
494 static void getCopyToPartsVector(SelectionDAG
&DAG
, const SDLoc
&dl
,
495 SDValue Val
, SDValue
*Parts
, unsigned NumParts
,
496 MVT PartVT
, const Value
*V
,
497 Optional
<CallingConv::ID
> CallConv
);
499 /// getCopyToParts - Create a series of nodes that contain the specified value
500 /// split into legal parts. If the parts contain more bits than Val, then, for
501 /// integers, ExtendKind can be used to specify how to generate the extra bits.
502 static void getCopyToParts(SelectionDAG
&DAG
, const SDLoc
&DL
, SDValue Val
,
503 SDValue
*Parts
, unsigned NumParts
, MVT PartVT
,
505 Optional
<CallingConv::ID
> CallConv
= None
,
506 ISD::NodeType ExtendKind
= ISD::ANY_EXTEND
) {
507 EVT ValueVT
= Val
.getValueType();
509 // Handle the vector case separately.
510 if (ValueVT
.isVector())
511 return getCopyToPartsVector(DAG
, DL
, Val
, Parts
, NumParts
, PartVT
, V
,
514 unsigned PartBits
= PartVT
.getSizeInBits();
515 unsigned OrigNumParts
= NumParts
;
516 assert(DAG
.getTargetLoweringInfo().isTypeLegal(PartVT
) &&
517 "Copying to an illegal type!");
522 assert(!ValueVT
.isVector() && "Vector case handled elsewhere");
523 EVT PartEVT
= PartVT
;
524 if (PartEVT
== ValueVT
) {
525 assert(NumParts
== 1 && "No-op copy with multiple parts!");
530 if (NumParts
* PartBits
> ValueVT
.getSizeInBits()) {
531 // If the parts cover more bits than the value has, promote the value.
532 if (PartVT
.isFloatingPoint() && ValueVT
.isFloatingPoint()) {
533 assert(NumParts
== 1 && "Do not know what to promote to!");
534 Val
= DAG
.getNode(ISD::FP_EXTEND
, DL
, PartVT
, Val
);
536 if (ValueVT
.isFloatingPoint()) {
537 // FP values need to be bitcast, then extended if they are being put
538 // into a larger container.
539 ValueVT
= EVT::getIntegerVT(*DAG
.getContext(), ValueVT
.getSizeInBits());
540 Val
= DAG
.getNode(ISD::BITCAST
, DL
, ValueVT
, Val
);
542 assert((PartVT
.isInteger() || PartVT
== MVT::x86mmx
) &&
543 ValueVT
.isInteger() &&
544 "Unknown mismatch!");
545 ValueVT
= EVT::getIntegerVT(*DAG
.getContext(), NumParts
* PartBits
);
546 Val
= DAG
.getNode(ExtendKind
, DL
, ValueVT
, Val
);
547 if (PartVT
== MVT::x86mmx
)
548 Val
= DAG
.getNode(ISD::BITCAST
, DL
, PartVT
, Val
);
550 } else if (PartBits
== ValueVT
.getSizeInBits()) {
551 // Different types of the same size.
552 assert(NumParts
== 1 && PartEVT
!= ValueVT
);
553 Val
= DAG
.getNode(ISD::BITCAST
, DL
, PartVT
, Val
);
554 } else if (NumParts
* PartBits
< ValueVT
.getSizeInBits()) {
555 // If the parts cover less bits than value has, truncate the value.
556 assert((PartVT
.isInteger() || PartVT
== MVT::x86mmx
) &&
557 ValueVT
.isInteger() &&
558 "Unknown mismatch!");
559 ValueVT
= EVT::getIntegerVT(*DAG
.getContext(), NumParts
* PartBits
);
560 Val
= DAG
.getNode(ISD::TRUNCATE
, DL
, ValueVT
, Val
);
561 if (PartVT
== MVT::x86mmx
)
562 Val
= DAG
.getNode(ISD::BITCAST
, DL
, PartVT
, Val
);
565 // The value may have changed - recompute ValueVT.
566 ValueVT
= Val
.getValueType();
567 assert(NumParts
* PartBits
== ValueVT
.getSizeInBits() &&
568 "Failed to tile the value with PartVT!");
571 if (PartEVT
!= ValueVT
) {
572 diagnosePossiblyInvalidConstraint(*DAG
.getContext(), V
,
573 "scalar-to-vector conversion failed");
574 Val
= DAG
.getNode(ISD::BITCAST
, DL
, PartVT
, Val
);
581 // Expand the value into multiple parts.
582 if (NumParts
& (NumParts
- 1)) {
583 // The number of parts is not a power of 2. Split off and copy the tail.
584 assert(PartVT
.isInteger() && ValueVT
.isInteger() &&
585 "Do not know what to expand to!");
586 unsigned RoundParts
= 1 << Log2_32(NumParts
);
587 unsigned RoundBits
= RoundParts
* PartBits
;
588 unsigned OddParts
= NumParts
- RoundParts
;
589 SDValue OddVal
= DAG
.getNode(ISD::SRL
, DL
, ValueVT
, Val
,
590 DAG
.getShiftAmountConstant(RoundBits
, ValueVT
, DL
, /*LegalTypes*/false));
592 getCopyToParts(DAG
, DL
, OddVal
, Parts
+ RoundParts
, OddParts
, PartVT
, V
,
595 if (DAG
.getDataLayout().isBigEndian())
596 // The odd parts were reversed by getCopyToParts - unreverse them.
597 std::reverse(Parts
+ RoundParts
, Parts
+ NumParts
);
599 NumParts
= RoundParts
;
600 ValueVT
= EVT::getIntegerVT(*DAG
.getContext(), NumParts
* PartBits
);
601 Val
= DAG
.getNode(ISD::TRUNCATE
, DL
, ValueVT
, Val
);
604 // The number of parts is a power of 2. Repeatedly bisect the value using
606 Parts
[0] = DAG
.getNode(ISD::BITCAST
, DL
,
607 EVT::getIntegerVT(*DAG
.getContext(),
608 ValueVT
.getSizeInBits()),
611 for (unsigned StepSize
= NumParts
; StepSize
> 1; StepSize
/= 2) {
612 for (unsigned i
= 0; i
< NumParts
; i
+= StepSize
) {
613 unsigned ThisBits
= StepSize
* PartBits
/ 2;
614 EVT ThisVT
= EVT::getIntegerVT(*DAG
.getContext(), ThisBits
);
615 SDValue
&Part0
= Parts
[i
];
616 SDValue
&Part1
= Parts
[i
+StepSize
/2];
618 Part1
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
,
619 ThisVT
, Part0
, DAG
.getIntPtrConstant(1, DL
));
620 Part0
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
,
621 ThisVT
, Part0
, DAG
.getIntPtrConstant(0, DL
));
623 if (ThisBits
== PartBits
&& ThisVT
!= PartVT
) {
624 Part0
= DAG
.getNode(ISD::BITCAST
, DL
, PartVT
, Part0
);
625 Part1
= DAG
.getNode(ISD::BITCAST
, DL
, PartVT
, Part1
);
630 if (DAG
.getDataLayout().isBigEndian())
631 std::reverse(Parts
, Parts
+ OrigNumParts
);
634 static SDValue
widenVectorToPartType(SelectionDAG
&DAG
,
635 SDValue Val
, const SDLoc
&DL
, EVT PartVT
) {
636 if (!PartVT
.isVector())
639 EVT ValueVT
= Val
.getValueType();
640 unsigned PartNumElts
= PartVT
.getVectorNumElements();
641 unsigned ValueNumElts
= ValueVT
.getVectorNumElements();
642 if (PartNumElts
> ValueNumElts
&&
643 PartVT
.getVectorElementType() == ValueVT
.getVectorElementType()) {
644 EVT ElementVT
= PartVT
.getVectorElementType();
645 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
647 SmallVector
<SDValue
, 16> Ops
;
648 DAG
.ExtractVectorElements(Val
, Ops
);
649 SDValue EltUndef
= DAG
.getUNDEF(ElementVT
);
650 for (unsigned i
= ValueNumElts
, e
= PartNumElts
; i
!= e
; ++i
)
651 Ops
.push_back(EltUndef
);
653 // FIXME: Use CONCAT for 2x -> 4x.
654 return DAG
.getBuildVector(PartVT
, DL
, Ops
);
660 /// getCopyToPartsVector - Create a series of nodes that contain the specified
661 /// value split into legal parts.
662 static void getCopyToPartsVector(SelectionDAG
&DAG
, const SDLoc
&DL
,
663 SDValue Val
, SDValue
*Parts
, unsigned NumParts
,
664 MVT PartVT
, const Value
*V
,
665 Optional
<CallingConv::ID
> CallConv
) {
666 EVT ValueVT
= Val
.getValueType();
667 assert(ValueVT
.isVector() && "Not a vector");
668 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
669 const bool IsABIRegCopy
= CallConv
.hasValue();
672 EVT PartEVT
= PartVT
;
673 if (PartEVT
== ValueVT
) {
675 } else if (PartVT
.getSizeInBits() == ValueVT
.getSizeInBits()) {
676 // Bitconvert vector->vector case.
677 Val
= DAG
.getNode(ISD::BITCAST
, DL
, PartVT
, Val
);
678 } else if (SDValue Widened
= widenVectorToPartType(DAG
, Val
, DL
, PartVT
)) {
680 } else if (PartVT
.isVector() &&
681 PartEVT
.getVectorElementType().bitsGE(
682 ValueVT
.getVectorElementType()) &&
683 PartEVT
.getVectorNumElements() == ValueVT
.getVectorNumElements()) {
685 // Promoted vector extract
686 Val
= DAG
.getAnyExtOrTrunc(Val
, DL
, PartVT
);
688 if (ValueVT
.getVectorNumElements() == 1) {
690 ISD::EXTRACT_VECTOR_ELT
, DL
, PartVT
, Val
,
691 DAG
.getConstant(0, DL
, TLI
.getVectorIdxTy(DAG
.getDataLayout())));
693 assert(PartVT
.getSizeInBits() > ValueVT
.getSizeInBits() &&
694 "lossy conversion of vector to scalar type");
695 EVT IntermediateType
=
696 EVT::getIntegerVT(*DAG
.getContext(), ValueVT
.getSizeInBits());
697 Val
= DAG
.getBitcast(IntermediateType
, Val
);
698 Val
= DAG
.getAnyExtOrTrunc(Val
, DL
, PartVT
);
702 assert(Val
.getValueType() == PartVT
&& "Unexpected vector part value type");
707 // Handle a multi-element vector.
710 unsigned NumIntermediates
;
713 NumRegs
= TLI
.getVectorTypeBreakdownForCallingConv(
714 *DAG
.getContext(), CallConv
.getValue(), ValueVT
, IntermediateVT
,
715 NumIntermediates
, RegisterVT
);
718 TLI
.getVectorTypeBreakdown(*DAG
.getContext(), ValueVT
, IntermediateVT
,
719 NumIntermediates
, RegisterVT
);
722 assert(NumRegs
== NumParts
&& "Part count doesn't match vector breakdown!");
723 NumParts
= NumRegs
; // Silence a compiler warning.
724 assert(RegisterVT
== PartVT
&& "Part type doesn't match vector breakdown!");
726 unsigned IntermediateNumElts
= IntermediateVT
.isVector() ?
727 IntermediateVT
.getVectorNumElements() : 1;
729 // Convert the vector to the appropriate type if necessary.
730 unsigned DestVectorNoElts
= NumIntermediates
* IntermediateNumElts
;
732 EVT BuiltVectorTy
= EVT::getVectorVT(
733 *DAG
.getContext(), IntermediateVT
.getScalarType(), DestVectorNoElts
);
734 MVT IdxVT
= TLI
.getVectorIdxTy(DAG
.getDataLayout());
735 if (ValueVT
!= BuiltVectorTy
) {
736 if (SDValue Widened
= widenVectorToPartType(DAG
, Val
, DL
, BuiltVectorTy
))
739 Val
= DAG
.getNode(ISD::BITCAST
, DL
, BuiltVectorTy
, Val
);
742 // Split the vector into intermediate operands.
743 SmallVector
<SDValue
, 8> Ops(NumIntermediates
);
744 for (unsigned i
= 0; i
!= NumIntermediates
; ++i
) {
745 if (IntermediateVT
.isVector()) {
746 Ops
[i
] = DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, DL
, IntermediateVT
, Val
,
747 DAG
.getConstant(i
* IntermediateNumElts
, DL
, IdxVT
));
749 Ops
[i
] = DAG
.getNode(
750 ISD::EXTRACT_VECTOR_ELT
, DL
, IntermediateVT
, Val
,
751 DAG
.getConstant(i
, DL
, IdxVT
));
755 // Split the intermediate operands into legal parts.
756 if (NumParts
== NumIntermediates
) {
757 // If the register was not expanded, promote or copy the value,
759 for (unsigned i
= 0; i
!= NumParts
; ++i
)
760 getCopyToParts(DAG
, DL
, Ops
[i
], &Parts
[i
], 1, PartVT
, V
, CallConv
);
761 } else if (NumParts
> 0) {
762 // If the intermediate type was expanded, split each the value into
764 assert(NumIntermediates
!= 0 && "division by zero");
765 assert(NumParts
% NumIntermediates
== 0 &&
766 "Must expand into a divisible number of parts!");
767 unsigned Factor
= NumParts
/ NumIntermediates
;
768 for (unsigned i
= 0; i
!= NumIntermediates
; ++i
)
769 getCopyToParts(DAG
, DL
, Ops
[i
], &Parts
[i
* Factor
], Factor
, PartVT
, V
,
774 RegsForValue::RegsForValue(const SmallVector
<unsigned, 4> ®s
, MVT regvt
,
775 EVT valuevt
, Optional
<CallingConv::ID
> CC
)
776 : ValueVTs(1, valuevt
), RegVTs(1, regvt
), Regs(regs
),
777 RegCount(1, regs
.size()), CallConv(CC
) {}
779 RegsForValue::RegsForValue(LLVMContext
&Context
, const TargetLowering
&TLI
,
780 const DataLayout
&DL
, unsigned Reg
, Type
*Ty
,
781 Optional
<CallingConv::ID
> CC
) {
782 ComputeValueVTs(TLI
, DL
, Ty
, ValueVTs
);
786 for (EVT ValueVT
: ValueVTs
) {
789 ? TLI
.getNumRegistersForCallingConv(Context
, CC
.getValue(), ValueVT
)
790 : TLI
.getNumRegisters(Context
, ValueVT
);
793 ? TLI
.getRegisterTypeForCallingConv(Context
, CC
.getValue(), ValueVT
)
794 : TLI
.getRegisterType(Context
, ValueVT
);
795 for (unsigned i
= 0; i
!= NumRegs
; ++i
)
796 Regs
.push_back(Reg
+ i
);
797 RegVTs
.push_back(RegisterVT
);
798 RegCount
.push_back(NumRegs
);
803 SDValue
RegsForValue::getCopyFromRegs(SelectionDAG
&DAG
,
804 FunctionLoweringInfo
&FuncInfo
,
805 const SDLoc
&dl
, SDValue
&Chain
,
806 SDValue
*Flag
, const Value
*V
) const {
807 // A Value with type {} or [0 x %t] needs no registers.
808 if (ValueVTs
.empty())
811 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
813 // Assemble the legal parts into the final values.
814 SmallVector
<SDValue
, 4> Values(ValueVTs
.size());
815 SmallVector
<SDValue
, 8> Parts
;
816 for (unsigned Value
= 0, Part
= 0, e
= ValueVTs
.size(); Value
!= e
; ++Value
) {
817 // Copy the legal parts from the registers.
818 EVT ValueVT
= ValueVTs
[Value
];
819 unsigned NumRegs
= RegCount
[Value
];
820 MVT RegisterVT
= isABIMangled() ? TLI
.getRegisterTypeForCallingConv(
822 CallConv
.getValue(), RegVTs
[Value
])
825 Parts
.resize(NumRegs
);
826 for (unsigned i
= 0; i
!= NumRegs
; ++i
) {
829 P
= DAG
.getCopyFromReg(Chain
, dl
, Regs
[Part
+i
], RegisterVT
);
831 P
= DAG
.getCopyFromReg(Chain
, dl
, Regs
[Part
+i
], RegisterVT
, *Flag
);
832 *Flag
= P
.getValue(2);
835 Chain
= P
.getValue(1);
838 // If the source register was virtual and if we know something about it,
839 // add an assert node.
840 if (!Register::isVirtualRegister(Regs
[Part
+ i
]) ||
841 !RegisterVT
.isInteger())
844 const FunctionLoweringInfo::LiveOutInfo
*LOI
=
845 FuncInfo
.GetLiveOutRegInfo(Regs
[Part
+i
]);
849 unsigned RegSize
= RegisterVT
.getScalarSizeInBits();
850 unsigned NumSignBits
= LOI
->NumSignBits
;
851 unsigned NumZeroBits
= LOI
->Known
.countMinLeadingZeros();
853 if (NumZeroBits
== RegSize
) {
854 // The current value is a zero.
855 // Explicitly express that as it would be easier for
856 // optimizations to kick in.
857 Parts
[i
] = DAG
.getConstant(0, dl
, RegisterVT
);
861 // FIXME: We capture more information than the dag can represent. For
862 // now, just use the tightest assertzext/assertsext possible.
864 EVT
FromVT(MVT::Other
);
866 FromVT
= EVT::getIntegerVT(*DAG
.getContext(), RegSize
- NumZeroBits
);
868 } else if (NumSignBits
> 1) {
870 EVT::getIntegerVT(*DAG
.getContext(), RegSize
- NumSignBits
+ 1);
875 // Add an assertion node.
876 assert(FromVT
!= MVT::Other
);
877 Parts
[i
] = DAG
.getNode(isSExt
? ISD::AssertSext
: ISD::AssertZext
, dl
,
878 RegisterVT
, P
, DAG
.getValueType(FromVT
));
881 Values
[Value
] = getCopyFromParts(DAG
, dl
, Parts
.begin(), NumRegs
,
882 RegisterVT
, ValueVT
, V
, CallConv
);
887 return DAG
.getNode(ISD::MERGE_VALUES
, dl
, DAG
.getVTList(ValueVTs
), Values
);
890 void RegsForValue::getCopyToRegs(SDValue Val
, SelectionDAG
&DAG
,
891 const SDLoc
&dl
, SDValue
&Chain
, SDValue
*Flag
,
893 ISD::NodeType PreferredExtendType
) const {
894 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
895 ISD::NodeType ExtendKind
= PreferredExtendType
;
897 // Get the list of the values's legal parts.
898 unsigned NumRegs
= Regs
.size();
899 SmallVector
<SDValue
, 8> Parts(NumRegs
);
900 for (unsigned Value
= 0, Part
= 0, e
= ValueVTs
.size(); Value
!= e
; ++Value
) {
901 unsigned NumParts
= RegCount
[Value
];
903 MVT RegisterVT
= isABIMangled() ? TLI
.getRegisterTypeForCallingConv(
905 CallConv
.getValue(), RegVTs
[Value
])
908 if (ExtendKind
== ISD::ANY_EXTEND
&& TLI
.isZExtFree(Val
, RegisterVT
))
909 ExtendKind
= ISD::ZERO_EXTEND
;
911 getCopyToParts(DAG
, dl
, Val
.getValue(Val
.getResNo() + Value
), &Parts
[Part
],
912 NumParts
, RegisterVT
, V
, CallConv
, ExtendKind
);
916 // Copy the parts into the registers.
917 SmallVector
<SDValue
, 8> Chains(NumRegs
);
918 for (unsigned i
= 0; i
!= NumRegs
; ++i
) {
921 Part
= DAG
.getCopyToReg(Chain
, dl
, Regs
[i
], Parts
[i
]);
923 Part
= DAG
.getCopyToReg(Chain
, dl
, Regs
[i
], Parts
[i
], *Flag
);
924 *Flag
= Part
.getValue(1);
927 Chains
[i
] = Part
.getValue(0);
930 if (NumRegs
== 1 || Flag
)
931 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
932 // flagged to it. That is the CopyToReg nodes and the user are considered
933 // a single scheduling unit. If we create a TokenFactor and return it as
934 // chain, then the TokenFactor is both a predecessor (operand) of the
935 // user as well as a successor (the TF operands are flagged to the user).
936 // c1, f1 = CopyToReg
937 // c2, f2 = CopyToReg
938 // c3 = TokenFactor c1, c2
941 Chain
= Chains
[NumRegs
-1];
943 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Chains
);
946 void RegsForValue::AddInlineAsmOperands(unsigned Code
, bool HasMatching
,
947 unsigned MatchingIdx
, const SDLoc
&dl
,
949 std::vector
<SDValue
> &Ops
) const {
950 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
952 unsigned Flag
= InlineAsm::getFlagWord(Code
, Regs
.size());
954 Flag
= InlineAsm::getFlagWordForMatchingOp(Flag
, MatchingIdx
);
955 else if (!Regs
.empty() && Register::isVirtualRegister(Regs
.front())) {
956 // Put the register class of the virtual registers in the flag word. That
957 // way, later passes can recompute register class constraints for inline
958 // assembly as well as normal instructions.
959 // Don't do this for tied operands that can use the regclass information
961 const MachineRegisterInfo
&MRI
= DAG
.getMachineFunction().getRegInfo();
962 const TargetRegisterClass
*RC
= MRI
.getRegClass(Regs
.front());
963 Flag
= InlineAsm::getFlagWordForRegClass(Flag
, RC
->getID());
966 SDValue Res
= DAG
.getTargetConstant(Flag
, dl
, MVT::i32
);
969 if (Code
== InlineAsm::Kind_Clobber
) {
970 // Clobbers should always have a 1:1 mapping with registers, and may
971 // reference registers that have illegal (e.g. vector) types. Hence, we
972 // shouldn't try to apply any sort of splitting logic to them.
973 assert(Regs
.size() == RegVTs
.size() && Regs
.size() == ValueVTs
.size() &&
974 "No 1:1 mapping from clobbers to regs?");
975 unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore();
977 for (unsigned I
= 0, E
= ValueVTs
.size(); I
!= E
; ++I
) {
978 Ops
.push_back(DAG
.getRegister(Regs
[I
], RegVTs
[I
]));
981 DAG
.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
982 "If we clobbered the stack pointer, MFI should know about it.");
987 for (unsigned Value
= 0, Reg
= 0, e
= ValueVTs
.size(); Value
!= e
; ++Value
) {
988 unsigned NumRegs
= TLI
.getNumRegisters(*DAG
.getContext(), ValueVTs
[Value
]);
989 MVT RegisterVT
= RegVTs
[Value
];
990 for (unsigned i
= 0; i
!= NumRegs
; ++i
) {
991 assert(Reg
< Regs
.size() && "Mismatch in # registers expected");
992 unsigned TheReg
= Regs
[Reg
++];
993 Ops
.push_back(DAG
.getRegister(TheReg
, RegisterVT
));
998 SmallVector
<std::pair
<unsigned, unsigned>, 4>
999 RegsForValue::getRegsAndSizes() const {
1000 SmallVector
<std::pair
<unsigned, unsigned>, 4> OutVec
;
1002 for (auto CountAndVT
: zip_first(RegCount
, RegVTs
)) {
1003 unsigned RegCount
= std::get
<0>(CountAndVT
);
1004 MVT RegisterVT
= std::get
<1>(CountAndVT
);
1005 unsigned RegisterSize
= RegisterVT
.getSizeInBits();
1006 for (unsigned E
= I
+ RegCount
; I
!= E
; ++I
)
1007 OutVec
.push_back(std::make_pair(Regs
[I
], RegisterSize
));
1012 void SelectionDAGBuilder::init(GCFunctionInfo
*gfi
, AliasAnalysis
*aa
,
1013 const TargetLibraryInfo
*li
) {
1017 DL
= &DAG
.getDataLayout();
1018 Context
= DAG
.getContext();
1019 LPadToCallSiteMap
.clear();
1020 SL
->init(DAG
.getTargetLoweringInfo(), TM
, DAG
.getDataLayout());
1023 void SelectionDAGBuilder::clear() {
1025 UnusedArgNodeMap
.clear();
1026 PendingLoads
.clear();
1027 PendingExports
.clear();
1028 PendingConstrainedFP
.clear();
1029 PendingConstrainedFPStrict
.clear();
1031 HasTailCall
= false;
1032 SDNodeOrder
= LowestSDNodeOrder
;
1033 StatepointLowering
.clear();
1036 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1037 DanglingDebugInfoMap
.clear();
1040 // Update DAG root to include dependencies on Pending chains.
1041 SDValue
SelectionDAGBuilder::updateRoot(SmallVectorImpl
<SDValue
> &Pending
) {
1042 SDValue Root
= DAG
.getRoot();
1044 if (Pending
.empty())
1047 // Add current root to PendingChains, unless we already indirectly
1049 if (Root
.getOpcode() != ISD::EntryToken
) {
1050 unsigned i
= 0, e
= Pending
.size();
1051 for (; i
!= e
; ++i
) {
1052 assert(Pending
[i
].getNode()->getNumOperands() > 1);
1053 if (Pending
[i
].getNode()->getOperand(0) == Root
)
1054 break; // Don't add the root if we already indirectly depend on it.
1058 Pending
.push_back(Root
);
1061 if (Pending
.size() == 1)
1064 Root
= DAG
.getTokenFactor(getCurSDLoc(), Pending
);
1071 SDValue
SelectionDAGBuilder::getMemoryRoot() {
1072 return updateRoot(PendingLoads
);
1075 SDValue
SelectionDAGBuilder::getRoot() {
1076 // Chain up all pending constrained intrinsics together with all
1077 // pending loads, by simply appending them to PendingLoads and
1078 // then calling getMemoryRoot().
1079 PendingLoads
.reserve(PendingLoads
.size() +
1080 PendingConstrainedFP
.size() +
1081 PendingConstrainedFPStrict
.size());
1082 PendingLoads
.append(PendingConstrainedFP
.begin(),
1083 PendingConstrainedFP
.end());
1084 PendingLoads
.append(PendingConstrainedFPStrict
.begin(),
1085 PendingConstrainedFPStrict
.end());
1086 PendingConstrainedFP
.clear();
1087 PendingConstrainedFPStrict
.clear();
1088 return getMemoryRoot();
1091 SDValue
SelectionDAGBuilder::getControlRoot() {
1092 // We need to emit pending fpexcept.strict constrained intrinsics,
1093 // so append them to the PendingExports list.
1094 PendingExports
.append(PendingConstrainedFPStrict
.begin(),
1095 PendingConstrainedFPStrict
.end());
1096 PendingConstrainedFPStrict
.clear();
1097 return updateRoot(PendingExports
);
1100 void SelectionDAGBuilder::visit(const Instruction
&I
) {
1101 // Set up outgoing PHI node register values before emitting the terminator.
1102 if (I
.isTerminator()) {
1103 HandlePHINodesInSuccessorBlocks(I
.getParent());
1106 // Increase the SDNodeOrder if dealing with a non-debug instruction.
1107 if (!isa
<DbgInfoIntrinsic
>(I
))
1112 visit(I
.getOpcode(), I
);
1114 if (auto *FPMO
= dyn_cast
<FPMathOperator
>(&I
)) {
1115 // Propagate the fast-math-flags of this IR instruction to the DAG node that
1116 // maps to this instruction.
1117 // TODO: We could handle all flags (nsw, etc) here.
1118 // TODO: If an IR instruction maps to >1 node, only the final node will have
1120 if (SDNode
*Node
= getNodeForIRValue(&I
)) {
1121 SDNodeFlags IncomingFlags
;
1122 IncomingFlags
.copyFMF(*FPMO
);
1123 if (!Node
->getFlags().isDefined())
1124 Node
->setFlags(IncomingFlags
);
1126 Node
->intersectFlagsWith(IncomingFlags
);
1129 // Constrained FP intrinsics with fpexcept.ignore should also get
1130 // the NoFPExcept flag.
1131 if (auto *FPI
= dyn_cast
<ConstrainedFPIntrinsic
>(&I
))
1132 if (FPI
->getExceptionBehavior() == fp::ExceptionBehavior::ebIgnore
)
1133 if (SDNode
*Node
= getNodeForIRValue(&I
)) {
1134 SDNodeFlags Flags
= Node
->getFlags();
1135 Flags
.setNoFPExcept(true);
1136 Node
->setFlags(Flags
);
1139 if (!I
.isTerminator() && !HasTailCall
&&
1140 !isStatepoint(&I
)) // statepoints handle their exports internally
1141 CopyToExportRegsIfNeeded(&I
);
1146 void SelectionDAGBuilder::visitPHI(const PHINode
&) {
1147 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1150 void SelectionDAGBuilder::visit(unsigned Opcode
, const User
&I
) {
1151 // Note: this doesn't use InstVisitor, because it has to work with
1152 // ConstantExpr's in addition to instructions.
1154 default: llvm_unreachable("Unknown instruction type encountered!");
1155 // Build the switch statement using the Instruction.def file.
1156 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1157 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1158 #include "llvm/IR/Instruction.def"
1162 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable
*Variable
,
1163 const DIExpression
*Expr
) {
1164 auto isMatchingDbgValue
= [&](DanglingDebugInfo
&DDI
) {
1165 const DbgValueInst
*DI
= DDI
.getDI();
1166 DIVariable
*DanglingVariable
= DI
->getVariable();
1167 DIExpression
*DanglingExpr
= DI
->getExpression();
1168 if (DanglingVariable
== Variable
&& Expr
->fragmentsOverlap(DanglingExpr
)) {
1169 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI
<< "\n");
1175 for (auto &DDIMI
: DanglingDebugInfoMap
) {
1176 DanglingDebugInfoVector
&DDIV
= DDIMI
.second
;
1178 // If debug info is to be dropped, run it through final checks to see
1179 // whether it can be salvaged.
1180 for (auto &DDI
: DDIV
)
1181 if (isMatchingDbgValue(DDI
))
1182 salvageUnresolvedDbgValue(DDI
);
1184 DDIV
.erase(remove_if(DDIV
, isMatchingDbgValue
), DDIV
.end());
1188 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1189 // generate the debug data structures now that we've seen its definition.
1190 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value
*V
,
1192 auto DanglingDbgInfoIt
= DanglingDebugInfoMap
.find(V
);
1193 if (DanglingDbgInfoIt
== DanglingDebugInfoMap
.end())
1196 DanglingDebugInfoVector
&DDIV
= DanglingDbgInfoIt
->second
;
1197 for (auto &DDI
: DDIV
) {
1198 const DbgValueInst
*DI
= DDI
.getDI();
1199 assert(DI
&& "Ill-formed DanglingDebugInfo");
1200 DebugLoc dl
= DDI
.getdl();
1201 unsigned ValSDNodeOrder
= Val
.getNode()->getIROrder();
1202 unsigned DbgSDNodeOrder
= DDI
.getSDNodeOrder();
1203 DILocalVariable
*Variable
= DI
->getVariable();
1204 DIExpression
*Expr
= DI
->getExpression();
1205 assert(Variable
->isValidLocationForIntrinsic(dl
) &&
1206 "Expected inlined-at fields to agree");
1208 if (Val
.getNode()) {
1209 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1210 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1211 // we couldn't resolve it directly when examining the DbgValue intrinsic
1212 // in the first place we should not be more successful here). Unless we
1213 // have some test case that prove this to be correct we should avoid
1214 // calling EmitFuncArgumentDbgValue here.
1215 if (!EmitFuncArgumentDbgValue(V
, Variable
, Expr
, dl
, false, Val
)) {
1216 LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
1217 << DbgSDNodeOrder
<< "] for:\n " << *DI
<< "\n");
1218 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val
.dump());
1219 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1220 // inserted after the definition of Val when emitting the instructions
1221 // after ISel. An alternative could be to teach
1222 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1223 LLVM_DEBUG(if (ValSDNodeOrder
> DbgSDNodeOrder
) dbgs()
1224 << "changing SDNodeOrder from " << DbgSDNodeOrder
<< " to "
1225 << ValSDNodeOrder
<< "\n");
1226 SDV
= getDbgValue(Val
, Variable
, Expr
, dl
,
1227 std::max(DbgSDNodeOrder
, ValSDNodeOrder
));
1228 DAG
.AddDbgValue(SDV
, Val
.getNode(), false);
1230 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
1231 << "in EmitFuncArgumentDbgValue\n");
1233 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< "\n");
1235 UndefValue::get(DDI
.getDI()->getVariableLocation()->getType());
1237 DAG
.getConstantDbgValue(Variable
, Expr
, Undef
, dl
, DbgSDNodeOrder
);
1238 DAG
.AddDbgValue(SDV
, nullptr, false);
1244 void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo
&DDI
) {
1245 Value
*V
= DDI
.getDI()->getValue();
1246 DILocalVariable
*Var
= DDI
.getDI()->getVariable();
1247 DIExpression
*Expr
= DDI
.getDI()->getExpression();
1248 DebugLoc DL
= DDI
.getdl();
1249 DebugLoc InstDL
= DDI
.getDI()->getDebugLoc();
1250 unsigned SDOrder
= DDI
.getSDNodeOrder();
1252 // Currently we consider only dbg.value intrinsics -- we tell the salvager
1253 // that DW_OP_stack_value is desired.
1254 assert(isa
<DbgValueInst
>(DDI
.getDI()));
1255 bool StackValue
= true;
1257 // Can this Value can be encoded without any further work?
1258 if (handleDebugValue(V
, Var
, Expr
, DL
, InstDL
, SDOrder
))
1261 // Attempt to salvage back through as many instructions as possible. Bail if
1262 // a non-instruction is seen, such as a constant expression or global
1263 // variable. FIXME: Further work could recover those too.
1264 while (isa
<Instruction
>(V
)) {
1265 Instruction
&VAsInst
= *cast
<Instruction
>(V
);
1266 DIExpression
*NewExpr
= salvageDebugInfoImpl(VAsInst
, Expr
, StackValue
);
1268 // If we cannot salvage any further, and haven't yet found a suitable debug
1269 // expression, bail out.
1273 // New value and expr now represent this debuginfo.
1274 V
= VAsInst
.getOperand(0);
1277 // Some kind of simplification occurred: check whether the operand of the
1278 // salvaged debug expression can be encoded in this DAG.
1279 if (handleDebugValue(V
, Var
, Expr
, DL
, InstDL
, SDOrder
)) {
1280 LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n "
1281 << DDI
.getDI() << "\nBy stripping back to:\n " << V
);
1286 // This was the final opportunity to salvage this debug information, and it
1287 // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1288 // any earlier variable location.
1289 auto Undef
= UndefValue::get(DDI
.getDI()->getVariableLocation()->getType());
1290 auto SDV
= DAG
.getConstantDbgValue(Var
, Expr
, Undef
, DL
, SDNodeOrder
);
1291 DAG
.AddDbgValue(SDV
, nullptr, false);
1293 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " << DDI
.getDI()
1295 LLVM_DEBUG(dbgs() << " Last seen at:\n " << *DDI
.getDI()->getOperand(0)
1299 bool SelectionDAGBuilder::handleDebugValue(const Value
*V
, DILocalVariable
*Var
,
1300 DIExpression
*Expr
, DebugLoc dl
,
1301 DebugLoc InstDL
, unsigned Order
) {
1302 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
1304 if (isa
<ConstantInt
>(V
) || isa
<ConstantFP
>(V
) || isa
<UndefValue
>(V
) ||
1305 isa
<ConstantPointerNull
>(V
)) {
1306 SDV
= DAG
.getConstantDbgValue(Var
, Expr
, V
, dl
, SDNodeOrder
);
1307 DAG
.AddDbgValue(SDV
, nullptr, false);
1311 // If the Value is a frame index, we can create a FrameIndex debug value
1312 // without relying on the DAG at all.
1313 if (const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(V
)) {
1314 auto SI
= FuncInfo
.StaticAllocaMap
.find(AI
);
1315 if (SI
!= FuncInfo
.StaticAllocaMap
.end()) {
1317 DAG
.getFrameIndexDbgValue(Var
, Expr
, SI
->second
,
1318 /*IsIndirect*/ false, dl
, SDNodeOrder
);
1319 // Do not attach the SDNodeDbgValue to an SDNode: this variable location
1320 // is still available even if the SDNode gets optimized out.
1321 DAG
.AddDbgValue(SDV
, nullptr, false);
1326 // Do not use getValue() in here; we don't want to generate code at
1327 // this point if it hasn't been done yet.
1328 SDValue N
= NodeMap
[V
];
1329 if (!N
.getNode() && isa
<Argument
>(V
)) // Check unused arguments map.
1330 N
= UnusedArgNodeMap
[V
];
1332 if (EmitFuncArgumentDbgValue(V
, Var
, Expr
, dl
, false, N
))
1334 SDV
= getDbgValue(N
, Var
, Expr
, dl
, SDNodeOrder
);
1335 DAG
.AddDbgValue(SDV
, N
.getNode(), false);
1339 // Special rules apply for the first dbg.values of parameter variables in a
1340 // function. Identify them by the fact they reference Argument Values, that
1341 // they're parameters, and they are parameters of the current function. We
1342 // need to let them dangle until they get an SDNode.
1343 bool IsParamOfFunc
= isa
<Argument
>(V
) && Var
->isParameter() &&
1344 !InstDL
.getInlinedAt();
1345 if (!IsParamOfFunc
) {
1346 // The value is not used in this block yet (or it would have an SDNode).
1347 // We still want the value to appear for the user if possible -- if it has
1348 // an associated VReg, we can refer to that instead.
1349 auto VMI
= FuncInfo
.ValueMap
.find(V
);
1350 if (VMI
!= FuncInfo
.ValueMap
.end()) {
1351 unsigned Reg
= VMI
->second
;
1352 // If this is a PHI node, it may be split up into several MI PHI nodes
1353 // (in FunctionLoweringInfo::set).
1354 RegsForValue
RFV(V
->getContext(), TLI
, DAG
.getDataLayout(), Reg
,
1355 V
->getType(), None
);
1356 if (RFV
.occupiesMultipleRegs()) {
1357 unsigned Offset
= 0;
1358 unsigned BitsToDescribe
= 0;
1359 if (auto VarSize
= Var
->getSizeInBits())
1360 BitsToDescribe
= *VarSize
;
1361 if (auto Fragment
= Expr
->getFragmentInfo())
1362 BitsToDescribe
= Fragment
->SizeInBits
;
1363 for (auto RegAndSize
: RFV
.getRegsAndSizes()) {
1364 unsigned RegisterSize
= RegAndSize
.second
;
1365 // Bail out if all bits are described already.
1366 if (Offset
>= BitsToDescribe
)
1368 unsigned FragmentSize
= (Offset
+ RegisterSize
> BitsToDescribe
)
1369 ? BitsToDescribe
- Offset
1371 auto FragmentExpr
= DIExpression::createFragmentExpression(
1372 Expr
, Offset
, FragmentSize
);
1375 SDV
= DAG
.getVRegDbgValue(Var
, *FragmentExpr
, RegAndSize
.first
,
1376 false, dl
, SDNodeOrder
);
1377 DAG
.AddDbgValue(SDV
, nullptr, false);
1378 Offset
+= RegisterSize
;
1381 SDV
= DAG
.getVRegDbgValue(Var
, Expr
, Reg
, false, dl
, SDNodeOrder
);
1382 DAG
.AddDbgValue(SDV
, nullptr, false);
1391 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1392 // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1393 for (auto &Pair
: DanglingDebugInfoMap
)
1394 for (auto &DDI
: Pair
.second
)
1395 salvageUnresolvedDbgValue(DDI
);
1396 clearDanglingDebugInfo();
1399 /// getCopyFromRegs - If there was virtual register allocated for the value V
1400 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1401 SDValue
SelectionDAGBuilder::getCopyFromRegs(const Value
*V
, Type
*Ty
) {
1402 DenseMap
<const Value
*, unsigned>::iterator It
= FuncInfo
.ValueMap
.find(V
);
1405 if (It
!= FuncInfo
.ValueMap
.end()) {
1406 unsigned InReg
= It
->second
;
1408 RegsForValue
RFV(*DAG
.getContext(), DAG
.getTargetLoweringInfo(),
1409 DAG
.getDataLayout(), InReg
, Ty
,
1410 None
); // This is not an ABI copy.
1411 SDValue Chain
= DAG
.getEntryNode();
1412 Result
= RFV
.getCopyFromRegs(DAG
, FuncInfo
, getCurSDLoc(), Chain
, nullptr,
1414 resolveDanglingDebugInfo(V
, Result
);
1420 /// getValue - Return an SDValue for the given Value.
1421 SDValue
SelectionDAGBuilder::getValue(const Value
*V
) {
1422 // If we already have an SDValue for this value, use it. It's important
1423 // to do this first, so that we don't create a CopyFromReg if we already
1424 // have a regular SDValue.
1425 SDValue
&N
= NodeMap
[V
];
1426 if (N
.getNode()) return N
;
1428 // If there's a virtual register allocated and initialized for this
1430 if (SDValue copyFromReg
= getCopyFromRegs(V
, V
->getType()))
1433 // Otherwise create a new SDValue and remember it.
1434 SDValue Val
= getValueImpl(V
);
1436 resolveDanglingDebugInfo(V
, Val
);
1440 // Return true if SDValue exists for the given Value
1441 bool SelectionDAGBuilder::findValue(const Value
*V
) const {
1442 return (NodeMap
.find(V
) != NodeMap
.end()) ||
1443 (FuncInfo
.ValueMap
.find(V
) != FuncInfo
.ValueMap
.end());
1446 /// getNonRegisterValue - Return an SDValue for the given Value, but
1447 /// don't look in FuncInfo.ValueMap for a virtual register.
1448 SDValue
SelectionDAGBuilder::getNonRegisterValue(const Value
*V
) {
1449 // If we already have an SDValue for this value, use it.
1450 SDValue
&N
= NodeMap
[V
];
1452 if (isa
<ConstantSDNode
>(N
) || isa
<ConstantFPSDNode
>(N
)) {
1453 // Remove the debug location from the node as the node is about to be used
1454 // in a location which may differ from the original debug location. This
1455 // is relevant to Constant and ConstantFP nodes because they can appear
1456 // as constant expressions inside PHI nodes.
1457 N
->setDebugLoc(DebugLoc());
1462 // Otherwise create a new SDValue and remember it.
1463 SDValue Val
= getValueImpl(V
);
1465 resolveDanglingDebugInfo(V
, Val
);
1469 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1470 /// Create an SDValue for the given value.
1471 SDValue
SelectionDAGBuilder::getValueImpl(const Value
*V
) {
1472 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
1474 if (const Constant
*C
= dyn_cast
<Constant
>(V
)) {
1475 EVT VT
= TLI
.getValueType(DAG
.getDataLayout(), V
->getType(), true);
1477 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(C
))
1478 return DAG
.getConstant(*CI
, getCurSDLoc(), VT
);
1480 if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(C
))
1481 return DAG
.getGlobalAddress(GV
, getCurSDLoc(), VT
);
1483 if (isa
<ConstantPointerNull
>(C
)) {
1484 unsigned AS
= V
->getType()->getPointerAddressSpace();
1485 return DAG
.getConstant(0, getCurSDLoc(),
1486 TLI
.getPointerTy(DAG
.getDataLayout(), AS
));
1489 if (const ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(C
))
1490 return DAG
.getConstantFP(*CFP
, getCurSDLoc(), VT
);
1492 if (isa
<UndefValue
>(C
) && !V
->getType()->isAggregateType())
1493 return DAG
.getUNDEF(VT
);
1495 if (const ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(C
)) {
1496 visit(CE
->getOpcode(), *CE
);
1497 SDValue N1
= NodeMap
[V
];
1498 assert(N1
.getNode() && "visit didn't populate the NodeMap!");
1502 if (isa
<ConstantStruct
>(C
) || isa
<ConstantArray
>(C
)) {
1503 SmallVector
<SDValue
, 4> Constants
;
1504 for (User::const_op_iterator OI
= C
->op_begin(), OE
= C
->op_end();
1506 SDNode
*Val
= getValue(*OI
).getNode();
1507 // If the operand is an empty aggregate, there are no values.
1509 // Add each leaf value from the operand to the Constants list
1510 // to form a flattened list of all the values.
1511 for (unsigned i
= 0, e
= Val
->getNumValues(); i
!= e
; ++i
)
1512 Constants
.push_back(SDValue(Val
, i
));
1515 return DAG
.getMergeValues(Constants
, getCurSDLoc());
1518 if (const ConstantDataSequential
*CDS
=
1519 dyn_cast
<ConstantDataSequential
>(C
)) {
1520 SmallVector
<SDValue
, 4> Ops
;
1521 for (unsigned i
= 0, e
= CDS
->getNumElements(); i
!= e
; ++i
) {
1522 SDNode
*Val
= getValue(CDS
->getElementAsConstant(i
)).getNode();
1523 // Add each leaf value from the operand to the Constants list
1524 // to form a flattened list of all the values.
1525 for (unsigned i
= 0, e
= Val
->getNumValues(); i
!= e
; ++i
)
1526 Ops
.push_back(SDValue(Val
, i
));
1529 if (isa
<ArrayType
>(CDS
->getType()))
1530 return DAG
.getMergeValues(Ops
, getCurSDLoc());
1531 return NodeMap
[V
] = DAG
.getBuildVector(VT
, getCurSDLoc(), Ops
);
1534 if (C
->getType()->isStructTy() || C
->getType()->isArrayTy()) {
1535 assert((isa
<ConstantAggregateZero
>(C
) || isa
<UndefValue
>(C
)) &&
1536 "Unknown struct or array constant!");
1538 SmallVector
<EVT
, 4> ValueVTs
;
1539 ComputeValueVTs(TLI
, DAG
.getDataLayout(), C
->getType(), ValueVTs
);
1540 unsigned NumElts
= ValueVTs
.size();
1542 return SDValue(); // empty struct
1543 SmallVector
<SDValue
, 4> Constants(NumElts
);
1544 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
1545 EVT EltVT
= ValueVTs
[i
];
1546 if (isa
<UndefValue
>(C
))
1547 Constants
[i
] = DAG
.getUNDEF(EltVT
);
1548 else if (EltVT
.isFloatingPoint())
1549 Constants
[i
] = DAG
.getConstantFP(0, getCurSDLoc(), EltVT
);
1551 Constants
[i
] = DAG
.getConstant(0, getCurSDLoc(), EltVT
);
1554 return DAG
.getMergeValues(Constants
, getCurSDLoc());
1557 if (const BlockAddress
*BA
= dyn_cast
<BlockAddress
>(C
))
1558 return DAG
.getBlockAddress(BA
, VT
);
1560 VectorType
*VecTy
= cast
<VectorType
>(V
->getType());
1561 unsigned NumElements
= VecTy
->getNumElements();
1563 // Now that we know the number and type of the elements, get that number of
1564 // elements into the Ops array based on what kind of constant it is.
1565 SmallVector
<SDValue
, 16> Ops
;
1566 if (const ConstantVector
*CV
= dyn_cast
<ConstantVector
>(C
)) {
1567 for (unsigned i
= 0; i
!= NumElements
; ++i
)
1568 Ops
.push_back(getValue(CV
->getOperand(i
)));
1570 assert(isa
<ConstantAggregateZero
>(C
) && "Unknown vector constant!");
1572 TLI
.getValueType(DAG
.getDataLayout(), VecTy
->getElementType());
1575 if (EltVT
.isFloatingPoint())
1576 Op
= DAG
.getConstantFP(0, getCurSDLoc(), EltVT
);
1578 Op
= DAG
.getConstant(0, getCurSDLoc(), EltVT
);
1579 Ops
.assign(NumElements
, Op
);
1582 // Create a BUILD_VECTOR node.
1583 return NodeMap
[V
] = DAG
.getBuildVector(VT
, getCurSDLoc(), Ops
);
1586 // If this is a static alloca, generate it as the frameindex instead of
1588 if (const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(V
)) {
1589 DenseMap
<const AllocaInst
*, int>::iterator SI
=
1590 FuncInfo
.StaticAllocaMap
.find(AI
);
1591 if (SI
!= FuncInfo
.StaticAllocaMap
.end())
1592 return DAG
.getFrameIndex(SI
->second
,
1593 TLI
.getFrameIndexTy(DAG
.getDataLayout()));
1596 // If this is an instruction which fast-isel has deferred, select it now.
1597 if (const Instruction
*Inst
= dyn_cast
<Instruction
>(V
)) {
1598 unsigned InReg
= FuncInfo
.InitializeRegForValue(Inst
);
1600 RegsForValue
RFV(*DAG
.getContext(), TLI
, DAG
.getDataLayout(), InReg
,
1601 Inst
->getType(), getABIRegCopyCC(V
));
1602 SDValue Chain
= DAG
.getEntryNode();
1603 return RFV
.getCopyFromRegs(DAG
, FuncInfo
, getCurSDLoc(), Chain
, nullptr, V
);
1606 llvm_unreachable("Can't get register for value!");
1609 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst
&I
) {
1610 auto Pers
= classifyEHPersonality(FuncInfo
.Fn
->getPersonalityFn());
1611 bool IsMSVCCXX
= Pers
== EHPersonality::MSVC_CXX
;
1612 bool IsCoreCLR
= Pers
== EHPersonality::CoreCLR
;
1613 bool IsSEH
= isAsynchronousEHPersonality(Pers
);
1614 bool IsWasmCXX
= Pers
== EHPersonality::Wasm_CXX
;
1615 MachineBasicBlock
*CatchPadMBB
= FuncInfo
.MBB
;
1617 CatchPadMBB
->setIsEHScopeEntry();
1618 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1619 if (IsMSVCCXX
|| IsCoreCLR
)
1620 CatchPadMBB
->setIsEHFuncletEntry();
1621 // Wasm does not need catchpads anymore
1623 DAG
.setRoot(DAG
.getNode(ISD::CATCHPAD
, getCurSDLoc(), MVT::Other
,
1627 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst
&I
) {
1628 // Update machine-CFG edge.
1629 MachineBasicBlock
*TargetMBB
= FuncInfo
.MBBMap
[I
.getSuccessor()];
1630 FuncInfo
.MBB
->addSuccessor(TargetMBB
);
1632 auto Pers
= classifyEHPersonality(FuncInfo
.Fn
->getPersonalityFn());
1633 bool IsSEH
= isAsynchronousEHPersonality(Pers
);
1635 // If this is not a fall-through branch or optimizations are switched off,
1637 if (TargetMBB
!= NextBlock(FuncInfo
.MBB
) ||
1638 TM
.getOptLevel() == CodeGenOpt::None
)
1639 DAG
.setRoot(DAG
.getNode(ISD::BR
, getCurSDLoc(), MVT::Other
,
1640 getControlRoot(), DAG
.getBasicBlock(TargetMBB
)));
1644 // Figure out the funclet membership for the catchret's successor.
1645 // This will be used by the FuncletLayout pass to determine how to order the
1647 // A 'catchret' returns to the outer scope's color.
1648 Value
*ParentPad
= I
.getCatchSwitchParentPad();
1649 const BasicBlock
*SuccessorColor
;
1650 if (isa
<ConstantTokenNone
>(ParentPad
))
1651 SuccessorColor
= &FuncInfo
.Fn
->getEntryBlock();
1653 SuccessorColor
= cast
<Instruction
>(ParentPad
)->getParent();
1654 assert(SuccessorColor
&& "No parent funclet for catchret!");
1655 MachineBasicBlock
*SuccessorColorMBB
= FuncInfo
.MBBMap
[SuccessorColor
];
1656 assert(SuccessorColorMBB
&& "No MBB for SuccessorColor!");
1658 // Create the terminator node.
1659 SDValue Ret
= DAG
.getNode(ISD::CATCHRET
, getCurSDLoc(), MVT::Other
,
1660 getControlRoot(), DAG
.getBasicBlock(TargetMBB
),
1661 DAG
.getBasicBlock(SuccessorColorMBB
));
1665 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst
&CPI
) {
1666 // Don't emit any special code for the cleanuppad instruction. It just marks
1667 // the start of an EH scope/funclet.
1668 FuncInfo
.MBB
->setIsEHScopeEntry();
1669 auto Pers
= classifyEHPersonality(FuncInfo
.Fn
->getPersonalityFn());
1670 if (Pers
!= EHPersonality::Wasm_CXX
) {
1671 FuncInfo
.MBB
->setIsEHFuncletEntry();
1672 FuncInfo
.MBB
->setIsCleanupFuncletEntry();
1676 // For wasm, there's alwyas a single catch pad attached to a catchswitch, and
1677 // the control flow always stops at the single catch pad, as it does for a
1678 // cleanup pad. In case the exception caught is not of the types the catch pad
1679 // catches, it will be rethrown by a rethrow.
1680 static void findWasmUnwindDestinations(
1681 FunctionLoweringInfo
&FuncInfo
, const BasicBlock
*EHPadBB
,
1682 BranchProbability Prob
,
1683 SmallVectorImpl
<std::pair
<MachineBasicBlock
*, BranchProbability
>>
1686 const Instruction
*Pad
= EHPadBB
->getFirstNonPHI();
1687 if (isa
<CleanupPadInst
>(Pad
)) {
1688 // Stop on cleanup pads.
1689 UnwindDests
.emplace_back(FuncInfo
.MBBMap
[EHPadBB
], Prob
);
1690 UnwindDests
.back().first
->setIsEHScopeEntry();
1692 } else if (auto *CatchSwitch
= dyn_cast
<CatchSwitchInst
>(Pad
)) {
1693 // Add the catchpad handlers to the possible destinations. We don't
1694 // continue to the unwind destination of the catchswitch for wasm.
1695 for (const BasicBlock
*CatchPadBB
: CatchSwitch
->handlers()) {
1696 UnwindDests
.emplace_back(FuncInfo
.MBBMap
[CatchPadBB
], Prob
);
1697 UnwindDests
.back().first
->setIsEHScopeEntry();
1706 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1707 /// many places it could ultimately go. In the IR, we have a single unwind
1708 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1709 /// This function skips over imaginary basic blocks that hold catchswitch
1710 /// instructions, and finds all the "real" machine
1711 /// basic block destinations. As those destinations may not be successors of
1712 /// EHPadBB, here we also calculate the edge probability to those destinations.
1713 /// The passed-in Prob is the edge probability to EHPadBB.
1714 static void findUnwindDestinations(
1715 FunctionLoweringInfo
&FuncInfo
, const BasicBlock
*EHPadBB
,
1716 BranchProbability Prob
,
1717 SmallVectorImpl
<std::pair
<MachineBasicBlock
*, BranchProbability
>>
1719 EHPersonality Personality
=
1720 classifyEHPersonality(FuncInfo
.Fn
->getPersonalityFn());
1721 bool IsMSVCCXX
= Personality
== EHPersonality::MSVC_CXX
;
1722 bool IsCoreCLR
= Personality
== EHPersonality::CoreCLR
;
1723 bool IsWasmCXX
= Personality
== EHPersonality::Wasm_CXX
;
1724 bool IsSEH
= isAsynchronousEHPersonality(Personality
);
1727 findWasmUnwindDestinations(FuncInfo
, EHPadBB
, Prob
, UnwindDests
);
1728 assert(UnwindDests
.size() <= 1 &&
1729 "There should be at most one unwind destination for wasm");
1734 const Instruction
*Pad
= EHPadBB
->getFirstNonPHI();
1735 BasicBlock
*NewEHPadBB
= nullptr;
1736 if (isa
<LandingPadInst
>(Pad
)) {
1737 // Stop on landingpads. They are not funclets.
1738 UnwindDests
.emplace_back(FuncInfo
.MBBMap
[EHPadBB
], Prob
);
1740 } else if (isa
<CleanupPadInst
>(Pad
)) {
1741 // Stop on cleanup pads. Cleanups are always funclet entries for all known
1743 UnwindDests
.emplace_back(FuncInfo
.MBBMap
[EHPadBB
], Prob
);
1744 UnwindDests
.back().first
->setIsEHScopeEntry();
1745 UnwindDests
.back().first
->setIsEHFuncletEntry();
1747 } else if (auto *CatchSwitch
= dyn_cast
<CatchSwitchInst
>(Pad
)) {
1748 // Add the catchpad handlers to the possible destinations.
1749 for (const BasicBlock
*CatchPadBB
: CatchSwitch
->handlers()) {
1750 UnwindDests
.emplace_back(FuncInfo
.MBBMap
[CatchPadBB
], Prob
);
1751 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1752 if (IsMSVCCXX
|| IsCoreCLR
)
1753 UnwindDests
.back().first
->setIsEHFuncletEntry();
1755 UnwindDests
.back().first
->setIsEHScopeEntry();
1757 NewEHPadBB
= CatchSwitch
->getUnwindDest();
1762 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
1763 if (BPI
&& NewEHPadBB
)
1764 Prob
*= BPI
->getEdgeProbability(EHPadBB
, NewEHPadBB
);
1765 EHPadBB
= NewEHPadBB
;
1769 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst
&I
) {
1770 // Update successor info.
1771 SmallVector
<std::pair
<MachineBasicBlock
*, BranchProbability
>, 1> UnwindDests
;
1772 auto UnwindDest
= I
.getUnwindDest();
1773 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
1774 BranchProbability UnwindDestProb
=
1776 ? BPI
->getEdgeProbability(FuncInfo
.MBB
->getBasicBlock(), UnwindDest
)
1777 : BranchProbability::getZero();
1778 findUnwindDestinations(FuncInfo
, UnwindDest
, UnwindDestProb
, UnwindDests
);
1779 for (auto &UnwindDest
: UnwindDests
) {
1780 UnwindDest
.first
->setIsEHPad();
1781 addSuccessorWithProb(FuncInfo
.MBB
, UnwindDest
.first
, UnwindDest
.second
);
1783 FuncInfo
.MBB
->normalizeSuccProbs();
1785 // Create the terminator node.
1787 DAG
.getNode(ISD::CLEANUPRET
, getCurSDLoc(), MVT::Other
, getControlRoot());
1791 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst
&CSI
) {
1792 report_fatal_error("visitCatchSwitch not yet implemented!");
1795 void SelectionDAGBuilder::visitRet(const ReturnInst
&I
) {
1796 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
1797 auto &DL
= DAG
.getDataLayout();
1798 SDValue Chain
= getControlRoot();
1799 SmallVector
<ISD::OutputArg
, 8> Outs
;
1800 SmallVector
<SDValue
, 8> OutVals
;
1802 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1805 // %val = call <ty> @llvm.experimental.deoptimize()
1809 if (I
.getParent()->getTerminatingDeoptimizeCall()) {
1810 LowerDeoptimizingReturn();
1814 if (!FuncInfo
.CanLowerReturn
) {
1815 unsigned DemoteReg
= FuncInfo
.DemoteRegister
;
1816 const Function
*F
= I
.getParent()->getParent();
1818 // Emit a store of the return value through the virtual register.
1819 // Leave Outs empty so that LowerReturn won't try to load return
1820 // registers the usual way.
1821 SmallVector
<EVT
, 1> PtrValueVTs
;
1822 ComputeValueVTs(TLI
, DL
,
1823 F
->getReturnType()->getPointerTo(
1824 DAG
.getDataLayout().getAllocaAddrSpace()),
1827 SDValue RetPtr
= DAG
.getCopyFromReg(DAG
.getEntryNode(), getCurSDLoc(),
1828 DemoteReg
, PtrValueVTs
[0]);
1829 SDValue RetOp
= getValue(I
.getOperand(0));
1831 SmallVector
<EVT
, 4> ValueVTs
, MemVTs
;
1832 SmallVector
<uint64_t, 4> Offsets
;
1833 ComputeValueVTs(TLI
, DL
, I
.getOperand(0)->getType(), ValueVTs
, &MemVTs
,
1835 unsigned NumValues
= ValueVTs
.size();
1837 SmallVector
<SDValue
, 4> Chains(NumValues
);
1838 for (unsigned i
= 0; i
!= NumValues
; ++i
) {
1839 // An aggregate return value cannot wrap around the address space, so
1840 // offsets to its parts don't wrap either.
1841 SDValue Ptr
= DAG
.getObjectPtrOffset(getCurSDLoc(), RetPtr
, Offsets
[i
]);
1843 SDValue Val
= RetOp
.getValue(RetOp
.getResNo() + i
);
1844 if (MemVTs
[i
] != ValueVTs
[i
])
1845 Val
= DAG
.getPtrExtOrTrunc(Val
, getCurSDLoc(), MemVTs
[i
]);
1846 Chains
[i
] = DAG
.getStore(Chain
, getCurSDLoc(), Val
,
1847 // FIXME: better loc info would be nice.
1848 Ptr
, MachinePointerInfo::getUnknownStack(DAG
.getMachineFunction()));
1851 Chain
= DAG
.getNode(ISD::TokenFactor
, getCurSDLoc(),
1852 MVT::Other
, Chains
);
1853 } else if (I
.getNumOperands() != 0) {
1854 SmallVector
<EVT
, 4> ValueVTs
;
1855 ComputeValueVTs(TLI
, DL
, I
.getOperand(0)->getType(), ValueVTs
);
1856 unsigned NumValues
= ValueVTs
.size();
1858 SDValue RetOp
= getValue(I
.getOperand(0));
1860 const Function
*F
= I
.getParent()->getParent();
1862 bool NeedsRegBlock
= TLI
.functionArgumentNeedsConsecutiveRegisters(
1863 I
.getOperand(0)->getType(), F
->getCallingConv(),
1864 /*IsVarArg*/ false);
1866 ISD::NodeType ExtendKind
= ISD::ANY_EXTEND
;
1867 if (F
->getAttributes().hasAttribute(AttributeList::ReturnIndex
,
1869 ExtendKind
= ISD::SIGN_EXTEND
;
1870 else if (F
->getAttributes().hasAttribute(AttributeList::ReturnIndex
,
1872 ExtendKind
= ISD::ZERO_EXTEND
;
1874 LLVMContext
&Context
= F
->getContext();
1875 bool RetInReg
= F
->getAttributes().hasAttribute(
1876 AttributeList::ReturnIndex
, Attribute::InReg
);
1878 for (unsigned j
= 0; j
!= NumValues
; ++j
) {
1879 EVT VT
= ValueVTs
[j
];
1881 if (ExtendKind
!= ISD::ANY_EXTEND
&& VT
.isInteger())
1882 VT
= TLI
.getTypeForExtReturn(Context
, VT
, ExtendKind
);
1884 CallingConv::ID CC
= F
->getCallingConv();
1886 unsigned NumParts
= TLI
.getNumRegistersForCallingConv(Context
, CC
, VT
);
1887 MVT PartVT
= TLI
.getRegisterTypeForCallingConv(Context
, CC
, VT
);
1888 SmallVector
<SDValue
, 4> Parts(NumParts
);
1889 getCopyToParts(DAG
, getCurSDLoc(),
1890 SDValue(RetOp
.getNode(), RetOp
.getResNo() + j
),
1891 &Parts
[0], NumParts
, PartVT
, &I
, CC
, ExtendKind
);
1893 // 'inreg' on function refers to return value
1894 ISD::ArgFlagsTy Flags
= ISD::ArgFlagsTy();
1898 if (I
.getOperand(0)->getType()->isPointerTy()) {
1900 Flags
.setPointerAddrSpace(
1901 cast
<PointerType
>(I
.getOperand(0)->getType())->getAddressSpace());
1904 if (NeedsRegBlock
) {
1905 Flags
.setInConsecutiveRegs();
1906 if (j
== NumValues
- 1)
1907 Flags
.setInConsecutiveRegsLast();
1910 // Propagate extension type if any
1911 if (ExtendKind
== ISD::SIGN_EXTEND
)
1913 else if (ExtendKind
== ISD::ZERO_EXTEND
)
1916 for (unsigned i
= 0; i
< NumParts
; ++i
) {
1917 Outs
.push_back(ISD::OutputArg(Flags
, Parts
[i
].getValueType(),
1918 VT
, /*isfixed=*/true, 0, 0));
1919 OutVals
.push_back(Parts
[i
]);
1925 // Push in swifterror virtual register as the last element of Outs. This makes
1926 // sure swifterror virtual register will be returned in the swifterror
1927 // physical register.
1928 const Function
*F
= I
.getParent()->getParent();
1929 if (TLI
.supportSwiftError() &&
1930 F
->getAttributes().hasAttrSomewhere(Attribute::SwiftError
)) {
1931 assert(SwiftError
.getFunctionArg() && "Need a swift error argument");
1932 ISD::ArgFlagsTy Flags
= ISD::ArgFlagsTy();
1933 Flags
.setSwiftError();
1934 Outs
.push_back(ISD::OutputArg(Flags
, EVT(TLI
.getPointerTy(DL
)) /*vt*/,
1935 EVT(TLI
.getPointerTy(DL
)) /*argvt*/,
1936 true /*isfixed*/, 1 /*origidx*/,
1938 // Create SDNode for the swifterror virtual register.
1940 DAG
.getRegister(SwiftError
.getOrCreateVRegUseAt(
1941 &I
, FuncInfo
.MBB
, SwiftError
.getFunctionArg()),
1942 EVT(TLI
.getPointerTy(DL
))));
1945 bool isVarArg
= DAG
.getMachineFunction().getFunction().isVarArg();
1946 CallingConv::ID CallConv
=
1947 DAG
.getMachineFunction().getFunction().getCallingConv();
1948 Chain
= DAG
.getTargetLoweringInfo().LowerReturn(
1949 Chain
, CallConv
, isVarArg
, Outs
, OutVals
, getCurSDLoc(), DAG
);
1951 // Verify that the target's LowerReturn behaved as expected.
1952 assert(Chain
.getNode() && Chain
.getValueType() == MVT::Other
&&
1953 "LowerReturn didn't return a valid chain!");
1955 // Update the DAG with the new chain value resulting from return lowering.
1959 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1960 /// created for it, emit nodes to copy the value into the virtual
1962 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value
*V
) {
1964 if (V
->getType()->isEmptyTy())
1967 DenseMap
<const Value
*, unsigned>::iterator VMI
= FuncInfo
.ValueMap
.find(V
);
1968 if (VMI
!= FuncInfo
.ValueMap
.end()) {
1969 assert(!V
->use_empty() && "Unused value assigned virtual registers!");
1970 CopyValueToVirtualRegister(V
, VMI
->second
);
1974 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1975 /// the current basic block, add it to ValueMap now so that we'll get a
1977 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value
*V
) {
1978 // No need to export constants.
1979 if (!isa
<Instruction
>(V
) && !isa
<Argument
>(V
)) return;
1981 // Already exported?
1982 if (FuncInfo
.isExportedInst(V
)) return;
1984 unsigned Reg
= FuncInfo
.InitializeRegForValue(V
);
1985 CopyValueToVirtualRegister(V
, Reg
);
1988 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value
*V
,
1989 const BasicBlock
*FromBB
) {
1990 // The operands of the setcc have to be in this block. We don't know
1991 // how to export them from some other block.
1992 if (const Instruction
*VI
= dyn_cast
<Instruction
>(V
)) {
1993 // Can export from current BB.
1994 if (VI
->getParent() == FromBB
)
1997 // Is already exported, noop.
1998 return FuncInfo
.isExportedInst(V
);
2001 // If this is an argument, we can export it if the BB is the entry block or
2002 // if it is already exported.
2003 if (isa
<Argument
>(V
)) {
2004 if (FromBB
== &FromBB
->getParent()->getEntryBlock())
2007 // Otherwise, can only export this if it is already exported.
2008 return FuncInfo
.isExportedInst(V
);
2011 // Otherwise, constants can always be exported.
2015 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2017 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock
*Src
,
2018 const MachineBasicBlock
*Dst
) const {
2019 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
2020 const BasicBlock
*SrcBB
= Src
->getBasicBlock();
2021 const BasicBlock
*DstBB
= Dst
->getBasicBlock();
2023 // If BPI is not available, set the default probability as 1 / N, where N is
2024 // the number of successors.
2025 auto SuccSize
= std::max
<uint32_t>(succ_size(SrcBB
), 1);
2026 return BranchProbability(1, SuccSize
);
2028 return BPI
->getEdgeProbability(SrcBB
, DstBB
);
2031 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock
*Src
,
2032 MachineBasicBlock
*Dst
,
2033 BranchProbability Prob
) {
2035 Src
->addSuccessorWithoutProb(Dst
);
2037 if (Prob
.isUnknown())
2038 Prob
= getEdgeProbability(Src
, Dst
);
2039 Src
->addSuccessor(Dst
, Prob
);
2043 static bool InBlock(const Value
*V
, const BasicBlock
*BB
) {
2044 if (const Instruction
*I
= dyn_cast
<Instruction
>(V
))
2045 return I
->getParent() == BB
;
2049 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2050 /// This function emits a branch and is used at the leaves of an OR or an
2051 /// AND operator tree.
2053 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value
*Cond
,
2054 MachineBasicBlock
*TBB
,
2055 MachineBasicBlock
*FBB
,
2056 MachineBasicBlock
*CurBB
,
2057 MachineBasicBlock
*SwitchBB
,
2058 BranchProbability TProb
,
2059 BranchProbability FProb
,
2061 const BasicBlock
*BB
= CurBB
->getBasicBlock();
2063 // If the leaf of the tree is a comparison, merge the condition into
2065 if (const CmpInst
*BOp
= dyn_cast
<CmpInst
>(Cond
)) {
2066 // The operands of the cmp have to be in this block. We don't know
2067 // how to export them from some other block. If this is the first block
2068 // of the sequence, no exporting is needed.
2069 if (CurBB
== SwitchBB
||
2070 (isExportableFromCurrentBlock(BOp
->getOperand(0), BB
) &&
2071 isExportableFromCurrentBlock(BOp
->getOperand(1), BB
))) {
2072 ISD::CondCode Condition
;
2073 if (const ICmpInst
*IC
= dyn_cast
<ICmpInst
>(Cond
)) {
2074 ICmpInst::Predicate Pred
=
2075 InvertCond
? IC
->getInversePredicate() : IC
->getPredicate();
2076 Condition
= getICmpCondCode(Pred
);
2078 const FCmpInst
*FC
= cast
<FCmpInst
>(Cond
);
2079 FCmpInst::Predicate Pred
=
2080 InvertCond
? FC
->getInversePredicate() : FC
->getPredicate();
2081 Condition
= getFCmpCondCode(Pred
);
2082 if (TM
.Options
.NoNaNsFPMath
)
2083 Condition
= getFCmpCodeWithoutNaN(Condition
);
2086 CaseBlock
CB(Condition
, BOp
->getOperand(0), BOp
->getOperand(1), nullptr,
2087 TBB
, FBB
, CurBB
, getCurSDLoc(), TProb
, FProb
);
2088 SL
->SwitchCases
.push_back(CB
);
2093 // Create a CaseBlock record representing this branch.
2094 ISD::CondCode Opc
= InvertCond
? ISD::SETNE
: ISD::SETEQ
;
2095 CaseBlock
CB(Opc
, Cond
, ConstantInt::getTrue(*DAG
.getContext()),
2096 nullptr, TBB
, FBB
, CurBB
, getCurSDLoc(), TProb
, FProb
);
2097 SL
->SwitchCases
.push_back(CB
);
2100 void SelectionDAGBuilder::FindMergedConditions(const Value
*Cond
,
2101 MachineBasicBlock
*TBB
,
2102 MachineBasicBlock
*FBB
,
2103 MachineBasicBlock
*CurBB
,
2104 MachineBasicBlock
*SwitchBB
,
2105 Instruction::BinaryOps Opc
,
2106 BranchProbability TProb
,
2107 BranchProbability FProb
,
2109 // Skip over not part of the tree and remember to invert op and operands at
2112 if (match(Cond
, m_OneUse(m_Not(m_Value(NotCond
)))) &&
2113 InBlock(NotCond
, CurBB
->getBasicBlock())) {
2114 FindMergedConditions(NotCond
, TBB
, FBB
, CurBB
, SwitchBB
, Opc
, TProb
, FProb
,
2119 const Instruction
*BOp
= dyn_cast
<Instruction
>(Cond
);
2120 // Compute the effective opcode for Cond, taking into account whether it needs
2121 // to be inverted, e.g.
2122 // and (not (or A, B)), C
2124 // and (and (not A, not B), C)
2127 BOpc
= BOp
->getOpcode();
2129 if (BOpc
== Instruction::And
)
2130 BOpc
= Instruction::Or
;
2131 else if (BOpc
== Instruction::Or
)
2132 BOpc
= Instruction::And
;
2136 // If this node is not part of the or/and tree, emit it as a branch.
2137 if (!BOp
|| !(isa
<BinaryOperator
>(BOp
) || isa
<CmpInst
>(BOp
)) ||
2138 BOpc
!= unsigned(Opc
) || !BOp
->hasOneUse() ||
2139 BOp
->getParent() != CurBB
->getBasicBlock() ||
2140 !InBlock(BOp
->getOperand(0), CurBB
->getBasicBlock()) ||
2141 !InBlock(BOp
->getOperand(1), CurBB
->getBasicBlock())) {
2142 EmitBranchForMergedCondition(Cond
, TBB
, FBB
, CurBB
, SwitchBB
,
2143 TProb
, FProb
, InvertCond
);
2147 // Create TmpBB after CurBB.
2148 MachineFunction::iterator
BBI(CurBB
);
2149 MachineFunction
&MF
= DAG
.getMachineFunction();
2150 MachineBasicBlock
*TmpBB
= MF
.CreateMachineBasicBlock(CurBB
->getBasicBlock());
2151 CurBB
->getParent()->insert(++BBI
, TmpBB
);
2153 if (Opc
== Instruction::Or
) {
2154 // Codegen X | Y as:
2163 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2164 // The requirement is that
2165 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2166 // = TrueProb for original BB.
2167 // Assuming the original probabilities are A and B, one choice is to set
2168 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2169 // A/(1+B) and 2B/(1+B). This choice assumes that
2170 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2171 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2172 // TmpBB, but the math is more complicated.
2174 auto NewTrueProb
= TProb
/ 2;
2175 auto NewFalseProb
= TProb
/ 2 + FProb
;
2176 // Emit the LHS condition.
2177 FindMergedConditions(BOp
->getOperand(0), TBB
, TmpBB
, CurBB
, SwitchBB
, Opc
,
2178 NewTrueProb
, NewFalseProb
, InvertCond
);
2180 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2181 SmallVector
<BranchProbability
, 2> Probs
{TProb
/ 2, FProb
};
2182 BranchProbability::normalizeProbabilities(Probs
.begin(), Probs
.end());
2183 // Emit the RHS condition into TmpBB.
2184 FindMergedConditions(BOp
->getOperand(1), TBB
, FBB
, TmpBB
, SwitchBB
, Opc
,
2185 Probs
[0], Probs
[1], InvertCond
);
2187 assert(Opc
== Instruction::And
&& "Unknown merge op!");
2188 // Codegen X & Y as:
2196 // This requires creation of TmpBB after CurBB.
2198 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2199 // The requirement is that
2200 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2201 // = FalseProb for original BB.
2202 // Assuming the original probabilities are A and B, one choice is to set
2203 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2204 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2205 // TrueProb for BB1 * FalseProb for TmpBB.
2207 auto NewTrueProb
= TProb
+ FProb
/ 2;
2208 auto NewFalseProb
= FProb
/ 2;
2209 // Emit the LHS condition.
2210 FindMergedConditions(BOp
->getOperand(0), TmpBB
, FBB
, CurBB
, SwitchBB
, Opc
,
2211 NewTrueProb
, NewFalseProb
, InvertCond
);
2213 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2214 SmallVector
<BranchProbability
, 2> Probs
{TProb
, FProb
/ 2};
2215 BranchProbability::normalizeProbabilities(Probs
.begin(), Probs
.end());
2216 // Emit the RHS condition into TmpBB.
2217 FindMergedConditions(BOp
->getOperand(1), TBB
, FBB
, TmpBB
, SwitchBB
, Opc
,
2218 Probs
[0], Probs
[1], InvertCond
);
2222 /// If the set of cases should be emitted as a series of branches, return true.
2223 /// If we should emit this as a bunch of and/or'd together conditions, return
2226 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector
<CaseBlock
> &Cases
) {
2227 if (Cases
.size() != 2) return true;
2229 // If this is two comparisons of the same values or'd or and'd together, they
2230 // will get folded into a single comparison, so don't emit two blocks.
2231 if ((Cases
[0].CmpLHS
== Cases
[1].CmpLHS
&&
2232 Cases
[0].CmpRHS
== Cases
[1].CmpRHS
) ||
2233 (Cases
[0].CmpRHS
== Cases
[1].CmpLHS
&&
2234 Cases
[0].CmpLHS
== Cases
[1].CmpRHS
)) {
2238 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2239 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2240 if (Cases
[0].CmpRHS
== Cases
[1].CmpRHS
&&
2241 Cases
[0].CC
== Cases
[1].CC
&&
2242 isa
<Constant
>(Cases
[0].CmpRHS
) &&
2243 cast
<Constant
>(Cases
[0].CmpRHS
)->isNullValue()) {
2244 if (Cases
[0].CC
== ISD::SETEQ
&& Cases
[0].TrueBB
== Cases
[1].ThisBB
)
2246 if (Cases
[0].CC
== ISD::SETNE
&& Cases
[0].FalseBB
== Cases
[1].ThisBB
)
2253 void SelectionDAGBuilder::visitBr(const BranchInst
&I
) {
2254 MachineBasicBlock
*BrMBB
= FuncInfo
.MBB
;
2256 // Update machine-CFG edges.
2257 MachineBasicBlock
*Succ0MBB
= FuncInfo
.MBBMap
[I
.getSuccessor(0)];
2259 if (I
.isUnconditional()) {
2260 // Update machine-CFG edges.
2261 BrMBB
->addSuccessor(Succ0MBB
);
2263 // If this is not a fall-through branch or optimizations are switched off,
2265 if (Succ0MBB
!= NextBlock(BrMBB
) || TM
.getOptLevel() == CodeGenOpt::None
)
2266 DAG
.setRoot(DAG
.getNode(ISD::BR
, getCurSDLoc(),
2267 MVT::Other
, getControlRoot(),
2268 DAG
.getBasicBlock(Succ0MBB
)));
2273 // If this condition is one of the special cases we handle, do special stuff
2275 const Value
*CondVal
= I
.getCondition();
2276 MachineBasicBlock
*Succ1MBB
= FuncInfo
.MBBMap
[I
.getSuccessor(1)];
2278 // If this is a series of conditions that are or'd or and'd together, emit
2279 // this as a sequence of branches instead of setcc's with and/or operations.
2280 // As long as jumps are not expensive, this should improve performance.
2281 // For example, instead of something like:
2293 if (const BinaryOperator
*BOp
= dyn_cast
<BinaryOperator
>(CondVal
)) {
2294 Instruction::BinaryOps Opcode
= BOp
->getOpcode();
2295 if (!DAG
.getTargetLoweringInfo().isJumpExpensive() && BOp
->hasOneUse() &&
2296 !I
.hasMetadata(LLVMContext::MD_unpredictable
) &&
2297 (Opcode
== Instruction::And
|| Opcode
== Instruction::Or
)) {
2298 FindMergedConditions(BOp
, Succ0MBB
, Succ1MBB
, BrMBB
, BrMBB
,
2300 getEdgeProbability(BrMBB
, Succ0MBB
),
2301 getEdgeProbability(BrMBB
, Succ1MBB
),
2302 /*InvertCond=*/false);
2303 // If the compares in later blocks need to use values not currently
2304 // exported from this block, export them now. This block should always
2305 // be the first entry.
2306 assert(SL
->SwitchCases
[0].ThisBB
== BrMBB
&& "Unexpected lowering!");
2308 // Allow some cases to be rejected.
2309 if (ShouldEmitAsBranches(SL
->SwitchCases
)) {
2310 for (unsigned i
= 1, e
= SL
->SwitchCases
.size(); i
!= e
; ++i
) {
2311 ExportFromCurrentBlock(SL
->SwitchCases
[i
].CmpLHS
);
2312 ExportFromCurrentBlock(SL
->SwitchCases
[i
].CmpRHS
);
2315 // Emit the branch for this block.
2316 visitSwitchCase(SL
->SwitchCases
[0], BrMBB
);
2317 SL
->SwitchCases
.erase(SL
->SwitchCases
.begin());
2321 // Okay, we decided not to do this, remove any inserted MBB's and clear
2323 for (unsigned i
= 1, e
= SL
->SwitchCases
.size(); i
!= e
; ++i
)
2324 FuncInfo
.MF
->erase(SL
->SwitchCases
[i
].ThisBB
);
2326 SL
->SwitchCases
.clear();
2330 // Create a CaseBlock record representing this branch.
2331 CaseBlock
CB(ISD::SETEQ
, CondVal
, ConstantInt::getTrue(*DAG
.getContext()),
2332 nullptr, Succ0MBB
, Succ1MBB
, BrMBB
, getCurSDLoc());
2334 // Use visitSwitchCase to actually insert the fast branch sequence for this
2336 visitSwitchCase(CB
, BrMBB
);
2339 /// visitSwitchCase - Emits the necessary code to represent a single node in
2340 /// the binary search tree resulting from lowering a switch instruction.
2341 void SelectionDAGBuilder::visitSwitchCase(CaseBlock
&CB
,
2342 MachineBasicBlock
*SwitchBB
) {
2344 SDValue CondLHS
= getValue(CB
.CmpLHS
);
2347 if (CB
.CC
== ISD::SETTRUE
) {
2348 // Branch or fall through to TrueBB.
2349 addSuccessorWithProb(SwitchBB
, CB
.TrueBB
, CB
.TrueProb
);
2350 SwitchBB
->normalizeSuccProbs();
2351 if (CB
.TrueBB
!= NextBlock(SwitchBB
)) {
2352 DAG
.setRoot(DAG
.getNode(ISD::BR
, dl
, MVT::Other
, getControlRoot(),
2353 DAG
.getBasicBlock(CB
.TrueBB
)));
2358 auto &TLI
= DAG
.getTargetLoweringInfo();
2359 EVT MemVT
= TLI
.getMemValueType(DAG
.getDataLayout(), CB
.CmpLHS
->getType());
2361 // Build the setcc now.
2363 // Fold "(X == true)" to X and "(X == false)" to !X to
2364 // handle common cases produced by branch lowering.
2365 if (CB
.CmpRHS
== ConstantInt::getTrue(*DAG
.getContext()) &&
2366 CB
.CC
== ISD::SETEQ
)
2368 else if (CB
.CmpRHS
== ConstantInt::getFalse(*DAG
.getContext()) &&
2369 CB
.CC
== ISD::SETEQ
) {
2370 SDValue True
= DAG
.getConstant(1, dl
, CondLHS
.getValueType());
2371 Cond
= DAG
.getNode(ISD::XOR
, dl
, CondLHS
.getValueType(), CondLHS
, True
);
2373 SDValue CondRHS
= getValue(CB
.CmpRHS
);
2375 // If a pointer's DAG type is larger than its memory type then the DAG
2376 // values are zero-extended. This breaks signed comparisons so truncate
2377 // back to the underlying type before doing the compare.
2378 if (CondLHS
.getValueType() != MemVT
) {
2379 CondLHS
= DAG
.getPtrExtOrTrunc(CondLHS
, getCurSDLoc(), MemVT
);
2380 CondRHS
= DAG
.getPtrExtOrTrunc(CondRHS
, getCurSDLoc(), MemVT
);
2382 Cond
= DAG
.getSetCC(dl
, MVT::i1
, CondLHS
, CondRHS
, CB
.CC
);
2385 assert(CB
.CC
== ISD::SETLE
&& "Can handle only LE ranges now");
2387 const APInt
& Low
= cast
<ConstantInt
>(CB
.CmpLHS
)->getValue();
2388 const APInt
& High
= cast
<ConstantInt
>(CB
.CmpRHS
)->getValue();
2390 SDValue CmpOp
= getValue(CB
.CmpMHS
);
2391 EVT VT
= CmpOp
.getValueType();
2393 if (cast
<ConstantInt
>(CB
.CmpLHS
)->isMinValue(true)) {
2394 Cond
= DAG
.getSetCC(dl
, MVT::i1
, CmpOp
, DAG
.getConstant(High
, dl
, VT
),
2397 SDValue SUB
= DAG
.getNode(ISD::SUB
, dl
,
2398 VT
, CmpOp
, DAG
.getConstant(Low
, dl
, VT
));
2399 Cond
= DAG
.getSetCC(dl
, MVT::i1
, SUB
,
2400 DAG
.getConstant(High
-Low
, dl
, VT
), ISD::SETULE
);
2404 // Update successor info
2405 addSuccessorWithProb(SwitchBB
, CB
.TrueBB
, CB
.TrueProb
);
2406 // TrueBB and FalseBB are always different unless the incoming IR is
2407 // degenerate. This only happens when running llc on weird IR.
2408 if (CB
.TrueBB
!= CB
.FalseBB
)
2409 addSuccessorWithProb(SwitchBB
, CB
.FalseBB
, CB
.FalseProb
);
2410 SwitchBB
->normalizeSuccProbs();
2412 // If the lhs block is the next block, invert the condition so that we can
2413 // fall through to the lhs instead of the rhs block.
2414 if (CB
.TrueBB
== NextBlock(SwitchBB
)) {
2415 std::swap(CB
.TrueBB
, CB
.FalseBB
);
2416 SDValue True
= DAG
.getConstant(1, dl
, Cond
.getValueType());
2417 Cond
= DAG
.getNode(ISD::XOR
, dl
, Cond
.getValueType(), Cond
, True
);
2420 SDValue BrCond
= DAG
.getNode(ISD::BRCOND
, dl
,
2421 MVT::Other
, getControlRoot(), Cond
,
2422 DAG
.getBasicBlock(CB
.TrueBB
));
2424 // Insert the false branch. Do this even if it's a fall through branch,
2425 // this makes it easier to do DAG optimizations which require inverting
2426 // the branch condition.
2427 BrCond
= DAG
.getNode(ISD::BR
, dl
, MVT::Other
, BrCond
,
2428 DAG
.getBasicBlock(CB
.FalseBB
));
2430 DAG
.setRoot(BrCond
);
2433 /// visitJumpTable - Emit JumpTable node in the current MBB
2434 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable
&JT
) {
2435 // Emit the code for the jump table
2436 assert(JT
.Reg
!= -1U && "Should lower JT Header first!");
2437 EVT PTy
= DAG
.getTargetLoweringInfo().getPointerTy(DAG
.getDataLayout());
2438 SDValue Index
= DAG
.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2440 SDValue Table
= DAG
.getJumpTable(JT
.JTI
, PTy
);
2441 SDValue BrJumpTable
= DAG
.getNode(ISD::BR_JT
, getCurSDLoc(),
2442 MVT::Other
, Index
.getValue(1),
2444 DAG
.setRoot(BrJumpTable
);
2447 /// visitJumpTableHeader - This function emits necessary code to produce index
2448 /// in the JumpTable from switch case.
2449 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable
&JT
,
2450 JumpTableHeader
&JTH
,
2451 MachineBasicBlock
*SwitchBB
) {
2452 SDLoc dl
= getCurSDLoc();
2454 // Subtract the lowest switch case value from the value being switched on.
2455 SDValue SwitchOp
= getValue(JTH
.SValue
);
2456 EVT VT
= SwitchOp
.getValueType();
2457 SDValue Sub
= DAG
.getNode(ISD::SUB
, dl
, VT
, SwitchOp
,
2458 DAG
.getConstant(JTH
.First
, dl
, VT
));
2460 // The SDNode we just created, which holds the value being switched on minus
2461 // the smallest case value, needs to be copied to a virtual register so it
2462 // can be used as an index into the jump table in a subsequent basic block.
2463 // This value may be smaller or larger than the target's pointer type, and
2464 // therefore require extension or truncating.
2465 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
2466 SwitchOp
= DAG
.getZExtOrTrunc(Sub
, dl
, TLI
.getPointerTy(DAG
.getDataLayout()));
2468 unsigned JumpTableReg
=
2469 FuncInfo
.CreateReg(TLI
.getPointerTy(DAG
.getDataLayout()));
2470 SDValue CopyTo
= DAG
.getCopyToReg(getControlRoot(), dl
,
2471 JumpTableReg
, SwitchOp
);
2472 JT
.Reg
= JumpTableReg
;
2474 if (!JTH
.OmitRangeCheck
) {
2475 // Emit the range check for the jump table, and branch to the default block
2476 // for the switch statement if the value being switched on exceeds the
2477 // largest case in the switch.
2478 SDValue CMP
= DAG
.getSetCC(
2479 dl
, TLI
.getSetCCResultType(DAG
.getDataLayout(), *DAG
.getContext(),
2480 Sub
.getValueType()),
2481 Sub
, DAG
.getConstant(JTH
.Last
- JTH
.First
, dl
, VT
), ISD::SETUGT
);
2483 SDValue BrCond
= DAG
.getNode(ISD::BRCOND
, dl
,
2484 MVT::Other
, CopyTo
, CMP
,
2485 DAG
.getBasicBlock(JT
.Default
));
2487 // Avoid emitting unnecessary branches to the next block.
2488 if (JT
.MBB
!= NextBlock(SwitchBB
))
2489 BrCond
= DAG
.getNode(ISD::BR
, dl
, MVT::Other
, BrCond
,
2490 DAG
.getBasicBlock(JT
.MBB
));
2492 DAG
.setRoot(BrCond
);
2494 // Avoid emitting unnecessary branches to the next block.
2495 if (JT
.MBB
!= NextBlock(SwitchBB
))
2496 DAG
.setRoot(DAG
.getNode(ISD::BR
, dl
, MVT::Other
, CopyTo
,
2497 DAG
.getBasicBlock(JT
.MBB
)));
2499 DAG
.setRoot(CopyTo
);
2503 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2504 /// variable if there exists one.
2505 static SDValue
getLoadStackGuard(SelectionDAG
&DAG
, const SDLoc
&DL
,
2507 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
2508 EVT PtrTy
= TLI
.getPointerTy(DAG
.getDataLayout());
2509 EVT PtrMemTy
= TLI
.getPointerMemTy(DAG
.getDataLayout());
2510 MachineFunction
&MF
= DAG
.getMachineFunction();
2511 Value
*Global
= TLI
.getSDagStackGuard(*MF
.getFunction().getParent());
2512 MachineSDNode
*Node
=
2513 DAG
.getMachineNode(TargetOpcode::LOAD_STACK_GUARD
, DL
, PtrTy
, Chain
);
2515 MachinePointerInfo
MPInfo(Global
);
2516 auto Flags
= MachineMemOperand::MOLoad
| MachineMemOperand::MOInvariant
|
2517 MachineMemOperand::MODereferenceable
;
2518 MachineMemOperand
*MemRef
= MF
.getMachineMemOperand(
2519 MPInfo
, Flags
, PtrTy
.getSizeInBits() / 8, DAG
.getEVTAlignment(PtrTy
));
2520 DAG
.setNodeMemRefs(Node
, {MemRef
});
2522 if (PtrTy
!= PtrMemTy
)
2523 return DAG
.getPtrExtOrTrunc(SDValue(Node
, 0), DL
, PtrMemTy
);
2524 return SDValue(Node
, 0);
2527 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2528 /// tail spliced into a stack protector check success bb.
2530 /// For a high level explanation of how this fits into the stack protector
2531 /// generation see the comment on the declaration of class
2532 /// StackProtectorDescriptor.
2533 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor
&SPD
,
2534 MachineBasicBlock
*ParentBB
) {
2536 // First create the loads to the guard/stack slot for the comparison.
2537 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
2538 EVT PtrTy
= TLI
.getPointerTy(DAG
.getDataLayout());
2539 EVT PtrMemTy
= TLI
.getPointerMemTy(DAG
.getDataLayout());
2541 MachineFrameInfo
&MFI
= ParentBB
->getParent()->getFrameInfo();
2542 int FI
= MFI
.getStackProtectorIndex();
2545 SDLoc dl
= getCurSDLoc();
2546 SDValue StackSlotPtr
= DAG
.getFrameIndex(FI
, PtrTy
);
2547 const Module
&M
= *ParentBB
->getParent()->getFunction().getParent();
2548 unsigned Align
= DL
->getPrefTypeAlignment(Type::getInt8PtrTy(M
.getContext()));
2550 // Generate code to load the content of the guard slot.
2551 SDValue GuardVal
= DAG
.getLoad(
2552 PtrMemTy
, dl
, DAG
.getEntryNode(), StackSlotPtr
,
2553 MachinePointerInfo::getFixedStack(DAG
.getMachineFunction(), FI
), Align
,
2554 MachineMemOperand::MOVolatile
);
2556 if (TLI
.useStackGuardXorFP())
2557 GuardVal
= TLI
.emitStackGuardXorFP(DAG
, GuardVal
, dl
);
2559 // Retrieve guard check function, nullptr if instrumentation is inlined.
2560 if (const Function
*GuardCheckFn
= TLI
.getSSPStackGuardCheck(M
)) {
2561 // The target provides a guard check function to validate the guard value.
2562 // Generate a call to that function with the content of the guard slot as
2564 FunctionType
*FnTy
= GuardCheckFn
->getFunctionType();
2565 assert(FnTy
->getNumParams() == 1 && "Invalid function signature");
2567 TargetLowering::ArgListTy Args
;
2568 TargetLowering::ArgListEntry Entry
;
2569 Entry
.Node
= GuardVal
;
2570 Entry
.Ty
= FnTy
->getParamType(0);
2571 if (GuardCheckFn
->hasAttribute(1, Attribute::AttrKind::InReg
))
2572 Entry
.IsInReg
= true;
2573 Args
.push_back(Entry
);
2575 TargetLowering::CallLoweringInfo
CLI(DAG
);
2576 CLI
.setDebugLoc(getCurSDLoc())
2577 .setChain(DAG
.getEntryNode())
2578 .setCallee(GuardCheckFn
->getCallingConv(), FnTy
->getReturnType(),
2579 getValue(GuardCheckFn
), std::move(Args
));
2581 std::pair
<SDValue
, SDValue
> Result
= TLI
.LowerCallTo(CLI
);
2582 DAG
.setRoot(Result
.second
);
2586 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2587 // Otherwise, emit a volatile load to retrieve the stack guard value.
2588 SDValue Chain
= DAG
.getEntryNode();
2589 if (TLI
.useLoadStackGuardNode()) {
2590 Guard
= getLoadStackGuard(DAG
, dl
, Chain
);
2592 const Value
*IRGuard
= TLI
.getSDagStackGuard(M
);
2593 SDValue GuardPtr
= getValue(IRGuard
);
2595 Guard
= DAG
.getLoad(PtrMemTy
, dl
, Chain
, GuardPtr
,
2596 MachinePointerInfo(IRGuard
, 0), Align
,
2597 MachineMemOperand::MOVolatile
);
2600 // Perform the comparison via a subtract/getsetcc.
2601 EVT VT
= Guard
.getValueType();
2602 SDValue Sub
= DAG
.getNode(ISD::SUB
, dl
, VT
, Guard
, GuardVal
);
2604 SDValue Cmp
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(DAG
.getDataLayout(),
2606 Sub
.getValueType()),
2607 Sub
, DAG
.getConstant(0, dl
, VT
), ISD::SETNE
);
2609 // If the sub is not 0, then we know the guard/stackslot do not equal, so
2610 // branch to failure MBB.
2611 SDValue BrCond
= DAG
.getNode(ISD::BRCOND
, dl
,
2612 MVT::Other
, GuardVal
.getOperand(0),
2613 Cmp
, DAG
.getBasicBlock(SPD
.getFailureMBB()));
2614 // Otherwise branch to success MBB.
2615 SDValue Br
= DAG
.getNode(ISD::BR
, dl
,
2617 DAG
.getBasicBlock(SPD
.getSuccessMBB()));
2622 /// Codegen the failure basic block for a stack protector check.
2624 /// A failure stack protector machine basic block consists simply of a call to
2625 /// __stack_chk_fail().
2627 /// For a high level explanation of how this fits into the stack protector
2628 /// generation see the comment on the declaration of class
2629 /// StackProtectorDescriptor.
2631 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor
&SPD
) {
2632 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
2633 TargetLowering::MakeLibCallOptions CallOptions
;
2634 CallOptions
.setDiscardResult(true);
2636 TLI
.makeLibCall(DAG
, RTLIB::STACKPROTECTOR_CHECK_FAIL
, MVT::isVoid
,
2637 None
, CallOptions
, getCurSDLoc()).second
;
2638 // On PS4, the "return address" must still be within the calling function,
2639 // even if it's at the very end, so emit an explicit TRAP here.
2640 // Passing 'true' for doesNotReturn above won't generate the trap for us.
2641 if (TM
.getTargetTriple().isPS4CPU())
2642 Chain
= DAG
.getNode(ISD::TRAP
, getCurSDLoc(), MVT::Other
, Chain
);
2647 /// visitBitTestHeader - This function emits necessary code to produce value
2648 /// suitable for "bit tests"
2649 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock
&B
,
2650 MachineBasicBlock
*SwitchBB
) {
2651 SDLoc dl
= getCurSDLoc();
2653 // Subtract the minimum value.
2654 SDValue SwitchOp
= getValue(B
.SValue
);
2655 EVT VT
= SwitchOp
.getValueType();
2657 DAG
.getNode(ISD::SUB
, dl
, VT
, SwitchOp
, DAG
.getConstant(B
.First
, dl
, VT
));
2659 // Determine the type of the test operands.
2660 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
2661 bool UsePtrType
= false;
2662 if (!TLI
.isTypeLegal(VT
)) {
2665 for (unsigned i
= 0, e
= B
.Cases
.size(); i
!= e
; ++i
)
2666 if (!isUIntN(VT
.getSizeInBits(), B
.Cases
[i
].Mask
)) {
2667 // Switch table case range are encoded into series of masks.
2668 // Just use pointer type, it's guaranteed to fit.
2673 SDValue Sub
= RangeSub
;
2675 VT
= TLI
.getPointerTy(DAG
.getDataLayout());
2676 Sub
= DAG
.getZExtOrTrunc(Sub
, dl
, VT
);
2679 B
.RegVT
= VT
.getSimpleVT();
2680 B
.Reg
= FuncInfo
.CreateReg(B
.RegVT
);
2681 SDValue CopyTo
= DAG
.getCopyToReg(getControlRoot(), dl
, B
.Reg
, Sub
);
2683 MachineBasicBlock
* MBB
= B
.Cases
[0].ThisBB
;
2685 if (!B
.OmitRangeCheck
)
2686 addSuccessorWithProb(SwitchBB
, B
.Default
, B
.DefaultProb
);
2687 addSuccessorWithProb(SwitchBB
, MBB
, B
.Prob
);
2688 SwitchBB
->normalizeSuccProbs();
2690 SDValue Root
= CopyTo
;
2691 if (!B
.OmitRangeCheck
) {
2692 // Conditional branch to the default block.
2693 SDValue RangeCmp
= DAG
.getSetCC(dl
,
2694 TLI
.getSetCCResultType(DAG
.getDataLayout(), *DAG
.getContext(),
2695 RangeSub
.getValueType()),
2696 RangeSub
, DAG
.getConstant(B
.Range
, dl
, RangeSub
.getValueType()),
2699 Root
= DAG
.getNode(ISD::BRCOND
, dl
, MVT::Other
, Root
, RangeCmp
,
2700 DAG
.getBasicBlock(B
.Default
));
2703 // Avoid emitting unnecessary branches to the next block.
2704 if (MBB
!= NextBlock(SwitchBB
))
2705 Root
= DAG
.getNode(ISD::BR
, dl
, MVT::Other
, Root
, DAG
.getBasicBlock(MBB
));
2710 /// visitBitTestCase - this function produces one "bit test"
2711 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock
&BB
,
2712 MachineBasicBlock
* NextMBB
,
2713 BranchProbability BranchProbToNext
,
2716 MachineBasicBlock
*SwitchBB
) {
2717 SDLoc dl
= getCurSDLoc();
2719 SDValue ShiftOp
= DAG
.getCopyFromReg(getControlRoot(), dl
, Reg
, VT
);
2721 unsigned PopCount
= countPopulation(B
.Mask
);
2722 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
2723 if (PopCount
== 1) {
2724 // Testing for a single bit; just compare the shift count with what it
2725 // would need to be to shift a 1 bit in that position.
2727 dl
, TLI
.getSetCCResultType(DAG
.getDataLayout(), *DAG
.getContext(), VT
),
2728 ShiftOp
, DAG
.getConstant(countTrailingZeros(B
.Mask
), dl
, VT
),
2730 } else if (PopCount
== BB
.Range
) {
2731 // There is only one zero bit in the range, test for it directly.
2733 dl
, TLI
.getSetCCResultType(DAG
.getDataLayout(), *DAG
.getContext(), VT
),
2734 ShiftOp
, DAG
.getConstant(countTrailingOnes(B
.Mask
), dl
, VT
),
2737 // Make desired shift
2738 SDValue SwitchVal
= DAG
.getNode(ISD::SHL
, dl
, VT
,
2739 DAG
.getConstant(1, dl
, VT
), ShiftOp
);
2741 // Emit bit tests and jumps
2742 SDValue AndOp
= DAG
.getNode(ISD::AND
, dl
,
2743 VT
, SwitchVal
, DAG
.getConstant(B
.Mask
, dl
, VT
));
2745 dl
, TLI
.getSetCCResultType(DAG
.getDataLayout(), *DAG
.getContext(), VT
),
2746 AndOp
, DAG
.getConstant(0, dl
, VT
), ISD::SETNE
);
2749 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2750 addSuccessorWithProb(SwitchBB
, B
.TargetBB
, B
.ExtraProb
);
2751 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2752 addSuccessorWithProb(SwitchBB
, NextMBB
, BranchProbToNext
);
2753 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2754 // one as they are relative probabilities (and thus work more like weights),
2755 // and hence we need to normalize them to let the sum of them become one.
2756 SwitchBB
->normalizeSuccProbs();
2758 SDValue BrAnd
= DAG
.getNode(ISD::BRCOND
, dl
,
2759 MVT::Other
, getControlRoot(),
2760 Cmp
, DAG
.getBasicBlock(B
.TargetBB
));
2762 // Avoid emitting unnecessary branches to the next block.
2763 if (NextMBB
!= NextBlock(SwitchBB
))
2764 BrAnd
= DAG
.getNode(ISD::BR
, dl
, MVT::Other
, BrAnd
,
2765 DAG
.getBasicBlock(NextMBB
));
2770 void SelectionDAGBuilder::visitInvoke(const InvokeInst
&I
) {
2771 MachineBasicBlock
*InvokeMBB
= FuncInfo
.MBB
;
2773 // Retrieve successors. Look through artificial IR level blocks like
2774 // catchswitch for successors.
2775 MachineBasicBlock
*Return
= FuncInfo
.MBBMap
[I
.getSuccessor(0)];
2776 const BasicBlock
*EHPadBB
= I
.getSuccessor(1);
2778 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2779 // have to do anything here to lower funclet bundles.
2780 assert(!I
.hasOperandBundlesOtherThan({LLVMContext::OB_deopt
,
2781 LLVMContext::OB_funclet
,
2782 LLVMContext::OB_cfguardtarget
}) &&
2783 "Cannot lower invokes with arbitrary operand bundles yet!");
2785 const Value
*Callee(I
.getCalledValue());
2786 const Function
*Fn
= dyn_cast
<Function
>(Callee
);
2787 if (isa
<InlineAsm
>(Callee
))
2789 else if (Fn
&& Fn
->isIntrinsic()) {
2790 switch (Fn
->getIntrinsicID()) {
2792 llvm_unreachable("Cannot invoke this intrinsic");
2793 case Intrinsic::donothing
:
2794 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2796 case Intrinsic::experimental_patchpoint_void
:
2797 case Intrinsic::experimental_patchpoint_i64
:
2798 visitPatchpoint(&I
, EHPadBB
);
2800 case Intrinsic::experimental_gc_statepoint
:
2801 LowerStatepoint(ImmutableStatepoint(&I
), EHPadBB
);
2803 case Intrinsic::wasm_rethrow_in_catch
: {
2804 // This is usually done in visitTargetIntrinsic, but this intrinsic is
2805 // special because it can be invoked, so we manually lower it to a DAG
2807 SmallVector
<SDValue
, 8> Ops
;
2808 Ops
.push_back(getRoot()); // inchain
2809 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
2811 DAG
.getTargetConstant(Intrinsic::wasm_rethrow_in_catch
, getCurSDLoc(),
2812 TLI
.getPointerTy(DAG
.getDataLayout())));
2813 SDVTList VTs
= DAG
.getVTList(ArrayRef
<EVT
>({MVT::Other
})); // outchain
2814 DAG
.setRoot(DAG
.getNode(ISD::INTRINSIC_VOID
, getCurSDLoc(), VTs
, Ops
));
2818 } else if (I
.countOperandBundlesOfType(LLVMContext::OB_deopt
)) {
2819 // Currently we do not lower any intrinsic calls with deopt operand bundles.
2820 // Eventually we will support lowering the @llvm.experimental.deoptimize
2821 // intrinsic, and right now there are no plans to support other intrinsics
2822 // with deopt state.
2823 LowerCallSiteWithDeoptBundle(&I
, getValue(Callee
), EHPadBB
);
2825 LowerCallTo(&I
, getValue(Callee
), false, EHPadBB
);
2828 // If the value of the invoke is used outside of its defining block, make it
2829 // available as a virtual register.
2830 // We already took care of the exported value for the statepoint instruction
2831 // during call to the LowerStatepoint.
2832 if (!isStatepoint(I
)) {
2833 CopyToExportRegsIfNeeded(&I
);
2836 SmallVector
<std::pair
<MachineBasicBlock
*, BranchProbability
>, 1> UnwindDests
;
2837 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
2838 BranchProbability EHPadBBProb
=
2839 BPI
? BPI
->getEdgeProbability(InvokeMBB
->getBasicBlock(), EHPadBB
)
2840 : BranchProbability::getZero();
2841 findUnwindDestinations(FuncInfo
, EHPadBB
, EHPadBBProb
, UnwindDests
);
2843 // Update successor info.
2844 addSuccessorWithProb(InvokeMBB
, Return
);
2845 for (auto &UnwindDest
: UnwindDests
) {
2846 UnwindDest
.first
->setIsEHPad();
2847 addSuccessorWithProb(InvokeMBB
, UnwindDest
.first
, UnwindDest
.second
);
2849 InvokeMBB
->normalizeSuccProbs();
2851 // Drop into normal successor.
2852 DAG
.setRoot(DAG
.getNode(ISD::BR
, getCurSDLoc(), MVT::Other
, getControlRoot(),
2853 DAG
.getBasicBlock(Return
)));
2856 void SelectionDAGBuilder::visitCallBr(const CallBrInst
&I
) {
2857 MachineBasicBlock
*CallBrMBB
= FuncInfo
.MBB
;
2859 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2860 // have to do anything here to lower funclet bundles.
2861 assert(!I
.hasOperandBundlesOtherThan(
2862 {LLVMContext::OB_deopt
, LLVMContext::OB_funclet
}) &&
2863 "Cannot lower callbrs with arbitrary operand bundles yet!");
2865 assert(isa
<InlineAsm
>(I
.getCalledValue()) &&
2866 "Only know how to handle inlineasm callbr");
2869 // Retrieve successors.
2870 MachineBasicBlock
*Return
= FuncInfo
.MBBMap
[I
.getDefaultDest()];
2872 // Update successor info.
2873 addSuccessorWithProb(CallBrMBB
, Return
);
2874 for (unsigned i
= 0, e
= I
.getNumIndirectDests(); i
< e
; ++i
) {
2875 MachineBasicBlock
*Target
= FuncInfo
.MBBMap
[I
.getIndirectDest(i
)];
2876 addSuccessorWithProb(CallBrMBB
, Target
);
2878 CallBrMBB
->normalizeSuccProbs();
2880 // Drop into default successor.
2881 DAG
.setRoot(DAG
.getNode(ISD::BR
, getCurSDLoc(),
2882 MVT::Other
, getControlRoot(),
2883 DAG
.getBasicBlock(Return
)));
2886 void SelectionDAGBuilder::visitResume(const ResumeInst
&RI
) {
2887 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2890 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst
&LP
) {
2891 assert(FuncInfo
.MBB
->isEHPad() &&
2892 "Call to landingpad not in landing pad!");
2894 // If there aren't registers to copy the values into (e.g., during SjLj
2895 // exceptions), then don't bother to create these DAG nodes.
2896 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
2897 const Constant
*PersonalityFn
= FuncInfo
.Fn
->getPersonalityFn();
2898 if (TLI
.getExceptionPointerRegister(PersonalityFn
) == 0 &&
2899 TLI
.getExceptionSelectorRegister(PersonalityFn
) == 0)
2902 // If landingpad's return type is token type, we don't create DAG nodes
2903 // for its exception pointer and selector value. The extraction of exception
2904 // pointer or selector value from token type landingpads is not currently
2906 if (LP
.getType()->isTokenTy())
2909 SmallVector
<EVT
, 2> ValueVTs
;
2910 SDLoc dl
= getCurSDLoc();
2911 ComputeValueVTs(TLI
, DAG
.getDataLayout(), LP
.getType(), ValueVTs
);
2912 assert(ValueVTs
.size() == 2 && "Only two-valued landingpads are supported");
2914 // Get the two live-in registers as SDValues. The physregs have already been
2915 // copied into virtual registers.
2917 if (FuncInfo
.ExceptionPointerVirtReg
) {
2918 Ops
[0] = DAG
.getZExtOrTrunc(
2919 DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
,
2920 FuncInfo
.ExceptionPointerVirtReg
,
2921 TLI
.getPointerTy(DAG
.getDataLayout())),
2924 Ops
[0] = DAG
.getConstant(0, dl
, TLI
.getPointerTy(DAG
.getDataLayout()));
2926 Ops
[1] = DAG
.getZExtOrTrunc(
2927 DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
,
2928 FuncInfo
.ExceptionSelectorVirtReg
,
2929 TLI
.getPointerTy(DAG
.getDataLayout())),
2933 SDValue Res
= DAG
.getNode(ISD::MERGE_VALUES
, dl
,
2934 DAG
.getVTList(ValueVTs
), Ops
);
2938 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock
*First
,
2939 MachineBasicBlock
*Last
) {
2941 for (unsigned i
= 0, e
= SL
->JTCases
.size(); i
!= e
; ++i
)
2942 if (SL
->JTCases
[i
].first
.HeaderBB
== First
)
2943 SL
->JTCases
[i
].first
.HeaderBB
= Last
;
2945 // Update BitTestCases.
2946 for (unsigned i
= 0, e
= SL
->BitTestCases
.size(); i
!= e
; ++i
)
2947 if (SL
->BitTestCases
[i
].Parent
== First
)
2948 SL
->BitTestCases
[i
].Parent
= Last
;
2951 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst
&I
) {
2952 MachineBasicBlock
*IndirectBrMBB
= FuncInfo
.MBB
;
2954 // Update machine-CFG edges with unique successors.
2955 SmallSet
<BasicBlock
*, 32> Done
;
2956 for (unsigned i
= 0, e
= I
.getNumSuccessors(); i
!= e
; ++i
) {
2957 BasicBlock
*BB
= I
.getSuccessor(i
);
2958 bool Inserted
= Done
.insert(BB
).second
;
2962 MachineBasicBlock
*Succ
= FuncInfo
.MBBMap
[BB
];
2963 addSuccessorWithProb(IndirectBrMBB
, Succ
);
2965 IndirectBrMBB
->normalizeSuccProbs();
2967 DAG
.setRoot(DAG
.getNode(ISD::BRIND
, getCurSDLoc(),
2968 MVT::Other
, getControlRoot(),
2969 getValue(I
.getAddress())));
2972 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst
&I
) {
2973 if (!DAG
.getTarget().Options
.TrapUnreachable
)
2976 // We may be able to ignore unreachable behind a noreturn call.
2977 if (DAG
.getTarget().Options
.NoTrapAfterNoreturn
) {
2978 const BasicBlock
&BB
= *I
.getParent();
2979 if (&I
!= &BB
.front()) {
2980 BasicBlock::const_iterator PredI
=
2981 std::prev(BasicBlock::const_iterator(&I
));
2982 if (const CallInst
*Call
= dyn_cast
<CallInst
>(&*PredI
)) {
2983 if (Call
->doesNotReturn())
2989 DAG
.setRoot(DAG
.getNode(ISD::TRAP
, getCurSDLoc(), MVT::Other
, DAG
.getRoot()));
2992 void SelectionDAGBuilder::visitFSub(const User
&I
) {
2993 // -0.0 - X --> fneg
2994 Type
*Ty
= I
.getType();
2995 if (isa
<Constant
>(I
.getOperand(0)) &&
2996 I
.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty
)) {
2997 SDValue Op2
= getValue(I
.getOperand(1));
2998 setValue(&I
, DAG
.getNode(ISD::FNEG
, getCurSDLoc(),
2999 Op2
.getValueType(), Op2
));
3003 visitBinary(I
, ISD::FSUB
);
3006 /// Checks if the given instruction performs a vector reduction, in which case
3007 /// we have the freedom to alter the elements in the result as long as the
3008 /// reduction of them stays unchanged.
3009 static bool isVectorReductionOp(const User
*I
) {
3010 const Instruction
*Inst
= dyn_cast
<Instruction
>(I
);
3011 if (!Inst
|| !Inst
->getType()->isVectorTy())
3014 auto OpCode
= Inst
->getOpcode();
3016 case Instruction::Add
:
3017 case Instruction::Mul
:
3018 case Instruction::And
:
3019 case Instruction::Or
:
3020 case Instruction::Xor
:
3022 case Instruction::FAdd
:
3023 case Instruction::FMul
:
3024 if (const FPMathOperator
*FPOp
= dyn_cast
<const FPMathOperator
>(Inst
))
3025 if (FPOp
->getFastMathFlags().isFast())
3032 unsigned ElemNum
= Inst
->getType()->getVectorNumElements();
3033 // Ensure the reduction size is a power of 2.
3034 if (!isPowerOf2_32(ElemNum
))
3037 unsigned ElemNumToReduce
= ElemNum
;
3039 // Do DFS search on the def-use chain from the given instruction. We only
3040 // allow four kinds of operations during the search until we reach the
3041 // instruction that extracts the first element from the vector:
3043 // 1. The reduction operation of the same opcode as the given instruction.
3047 // 3. ShuffleVector instruction together with a reduction operation that
3048 // does a partial reduction.
3050 // 4. ExtractElement that extracts the first element from the vector, and we
3051 // stop searching the def-use chain here.
3053 // 3 & 4 above perform a reduction on all elements of the vector. We push defs
3054 // from 1-3 to the stack to continue the DFS. The given instruction is not
3055 // a reduction operation if we meet any other instructions other than those
3058 SmallVector
<const User
*, 16> UsersToVisit
{Inst
};
3059 SmallPtrSet
<const User
*, 16> Visited
;
3060 bool ReduxExtracted
= false;
3062 while (!UsersToVisit
.empty()) {
3063 auto User
= UsersToVisit
.back();
3064 UsersToVisit
.pop_back();
3065 if (!Visited
.insert(User
).second
)
3068 for (const auto *U
: User
->users()) {
3069 auto Inst
= dyn_cast
<Instruction
>(U
);
3073 if (Inst
->getOpcode() == OpCode
|| isa
<PHINode
>(U
)) {
3074 if (const FPMathOperator
*FPOp
= dyn_cast
<const FPMathOperator
>(Inst
))
3075 if (!isa
<PHINode
>(FPOp
) && !FPOp
->getFastMathFlags().isFast())
3077 UsersToVisit
.push_back(U
);
3078 } else if (const ShuffleVectorInst
*ShufInst
=
3079 dyn_cast
<ShuffleVectorInst
>(U
)) {
3080 // Detect the following pattern: A ShuffleVector instruction together
3081 // with a reduction that do partial reduction on the first and second
3082 // ElemNumToReduce / 2 elements, and store the result in
3083 // ElemNumToReduce / 2 elements in another vector.
3085 unsigned ResultElements
= ShufInst
->getType()->getVectorNumElements();
3086 if (ResultElements
< ElemNum
)
3089 if (ElemNumToReduce
== 1)
3091 if (!isa
<UndefValue
>(U
->getOperand(1)))
3093 for (unsigned i
= 0; i
< ElemNumToReduce
/ 2; ++i
)
3094 if (ShufInst
->getMaskValue(i
) != int(i
+ ElemNumToReduce
/ 2))
3096 for (unsigned i
= ElemNumToReduce
/ 2; i
< ElemNum
; ++i
)
3097 if (ShufInst
->getMaskValue(i
) != -1)
3100 // There is only one user of this ShuffleVector instruction, which
3101 // must be a reduction operation.
3102 if (!U
->hasOneUse())
3105 auto U2
= dyn_cast
<Instruction
>(*U
->user_begin());
3106 if (!U2
|| U2
->getOpcode() != OpCode
)
3109 // Check operands of the reduction operation.
3110 if ((U2
->getOperand(0) == U
->getOperand(0) && U2
->getOperand(1) == U
) ||
3111 (U2
->getOperand(1) == U
->getOperand(0) && U2
->getOperand(0) == U
)) {
3112 UsersToVisit
.push_back(U2
);
3113 ElemNumToReduce
/= 2;
3116 } else if (isa
<ExtractElementInst
>(U
)) {
3117 // At this moment we should have reduced all elements in the vector.
3118 if (ElemNumToReduce
!= 1)
3121 const ConstantInt
*Val
= dyn_cast
<ConstantInt
>(U
->getOperand(1));
3122 if (!Val
|| !Val
->isZero())
3125 ReduxExtracted
= true;
3130 return ReduxExtracted
;
3133 void SelectionDAGBuilder::visitUnary(const User
&I
, unsigned Opcode
) {
3136 SDValue Op
= getValue(I
.getOperand(0));
3137 SDValue UnNodeValue
= DAG
.getNode(Opcode
, getCurSDLoc(), Op
.getValueType(),
3139 setValue(&I
, UnNodeValue
);
3142 void SelectionDAGBuilder::visitBinary(const User
&I
, unsigned Opcode
) {
3144 if (auto *OFBinOp
= dyn_cast
<OverflowingBinaryOperator
>(&I
)) {
3145 Flags
.setNoSignedWrap(OFBinOp
->hasNoSignedWrap());
3146 Flags
.setNoUnsignedWrap(OFBinOp
->hasNoUnsignedWrap());
3148 if (auto *ExactOp
= dyn_cast
<PossiblyExactOperator
>(&I
)) {
3149 Flags
.setExact(ExactOp
->isExact());
3151 if (isVectorReductionOp(&I
)) {
3152 Flags
.setVectorReduction(true);
3153 LLVM_DEBUG(dbgs() << "Detected a reduction operation:" << I
<< "\n");
3155 // If no flags are set we will propagate the incoming flags, if any flags
3156 // are set, we will intersect them with the incoming flag and so we need to
3157 // copy the FMF flags here.
3158 if (auto *FPOp
= dyn_cast
<FPMathOperator
>(&I
)) {
3159 Flags
.copyFMF(*FPOp
);
3163 SDValue Op1
= getValue(I
.getOperand(0));
3164 SDValue Op2
= getValue(I
.getOperand(1));
3165 SDValue BinNodeValue
= DAG
.getNode(Opcode
, getCurSDLoc(), Op1
.getValueType(),
3167 setValue(&I
, BinNodeValue
);
3170 void SelectionDAGBuilder::visitShift(const User
&I
, unsigned Opcode
) {
3171 SDValue Op1
= getValue(I
.getOperand(0));
3172 SDValue Op2
= getValue(I
.getOperand(1));
3174 EVT ShiftTy
= DAG
.getTargetLoweringInfo().getShiftAmountTy(
3175 Op1
.getValueType(), DAG
.getDataLayout());
3177 // Coerce the shift amount to the right type if we can.
3178 if (!I
.getType()->isVectorTy() && Op2
.getValueType() != ShiftTy
) {
3179 unsigned ShiftSize
= ShiftTy
.getSizeInBits();
3180 unsigned Op2Size
= Op2
.getValueSizeInBits();
3181 SDLoc DL
= getCurSDLoc();
3183 // If the operand is smaller than the shift count type, promote it.
3184 if (ShiftSize
> Op2Size
)
3185 Op2
= DAG
.getNode(ISD::ZERO_EXTEND
, DL
, ShiftTy
, Op2
);
3187 // If the operand is larger than the shift count type but the shift
3188 // count type has enough bits to represent any shift value, truncate
3189 // it now. This is a common case and it exposes the truncate to
3190 // optimization early.
3191 else if (ShiftSize
>= Log2_32_Ceil(Op2
.getValueSizeInBits()))
3192 Op2
= DAG
.getNode(ISD::TRUNCATE
, DL
, ShiftTy
, Op2
);
3193 // Otherwise we'll need to temporarily settle for some other convenient
3194 // type. Type legalization will make adjustments once the shiftee is split.
3196 Op2
= DAG
.getZExtOrTrunc(Op2
, DL
, MVT::i32
);
3203 if (Opcode
== ISD::SRL
|| Opcode
== ISD::SRA
|| Opcode
== ISD::SHL
) {
3205 if (const OverflowingBinaryOperator
*OFBinOp
=
3206 dyn_cast
<const OverflowingBinaryOperator
>(&I
)) {
3207 nuw
= OFBinOp
->hasNoUnsignedWrap();
3208 nsw
= OFBinOp
->hasNoSignedWrap();
3210 if (const PossiblyExactOperator
*ExactOp
=
3211 dyn_cast
<const PossiblyExactOperator
>(&I
))
3212 exact
= ExactOp
->isExact();
3215 Flags
.setExact(exact
);
3216 Flags
.setNoSignedWrap(nsw
);
3217 Flags
.setNoUnsignedWrap(nuw
);
3218 SDValue Res
= DAG
.getNode(Opcode
, getCurSDLoc(), Op1
.getValueType(), Op1
, Op2
,
3223 void SelectionDAGBuilder::visitSDiv(const User
&I
) {
3224 SDValue Op1
= getValue(I
.getOperand(0));
3225 SDValue Op2
= getValue(I
.getOperand(1));
3228 Flags
.setExact(isa
<PossiblyExactOperator
>(&I
) &&
3229 cast
<PossiblyExactOperator
>(&I
)->isExact());
3230 setValue(&I
, DAG
.getNode(ISD::SDIV
, getCurSDLoc(), Op1
.getValueType(), Op1
,
3234 void SelectionDAGBuilder::visitICmp(const User
&I
) {
3235 ICmpInst::Predicate predicate
= ICmpInst::BAD_ICMP_PREDICATE
;
3236 if (const ICmpInst
*IC
= dyn_cast
<ICmpInst
>(&I
))
3237 predicate
= IC
->getPredicate();
3238 else if (const ConstantExpr
*IC
= dyn_cast
<ConstantExpr
>(&I
))
3239 predicate
= ICmpInst::Predicate(IC
->getPredicate());
3240 SDValue Op1
= getValue(I
.getOperand(0));
3241 SDValue Op2
= getValue(I
.getOperand(1));
3242 ISD::CondCode Opcode
= getICmpCondCode(predicate
);
3244 auto &TLI
= DAG
.getTargetLoweringInfo();
3246 TLI
.getMemValueType(DAG
.getDataLayout(), I
.getOperand(0)->getType());
3248 // If a pointer's DAG type is larger than its memory type then the DAG values
3249 // are zero-extended. This breaks signed comparisons so truncate back to the
3250 // underlying type before doing the compare.
3251 if (Op1
.getValueType() != MemVT
) {
3252 Op1
= DAG
.getPtrExtOrTrunc(Op1
, getCurSDLoc(), MemVT
);
3253 Op2
= DAG
.getPtrExtOrTrunc(Op2
, getCurSDLoc(), MemVT
);
3256 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3258 setValue(&I
, DAG
.getSetCC(getCurSDLoc(), DestVT
, Op1
, Op2
, Opcode
));
3261 void SelectionDAGBuilder::visitFCmp(const User
&I
) {
3262 FCmpInst::Predicate predicate
= FCmpInst::BAD_FCMP_PREDICATE
;
3263 if (const FCmpInst
*FC
= dyn_cast
<FCmpInst
>(&I
))
3264 predicate
= FC
->getPredicate();
3265 else if (const ConstantExpr
*FC
= dyn_cast
<ConstantExpr
>(&I
))
3266 predicate
= FCmpInst::Predicate(FC
->getPredicate());
3267 SDValue Op1
= getValue(I
.getOperand(0));
3268 SDValue Op2
= getValue(I
.getOperand(1));
3270 ISD::CondCode Condition
= getFCmpCondCode(predicate
);
3271 auto *FPMO
= dyn_cast
<FPMathOperator
>(&I
);
3272 if ((FPMO
&& FPMO
->hasNoNaNs()) || TM
.Options
.NoNaNsFPMath
)
3273 Condition
= getFCmpCodeWithoutNaN(Condition
);
3275 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3277 setValue(&I
, DAG
.getSetCC(getCurSDLoc(), DestVT
, Op1
, Op2
, Condition
));
3280 // Check if the condition of the select has one use or two users that are both
3281 // selects with the same condition.
3282 static bool hasOnlySelectUsers(const Value
*Cond
) {
3283 return llvm::all_of(Cond
->users(), [](const Value
*V
) {
3284 return isa
<SelectInst
>(V
);
3288 void SelectionDAGBuilder::visitSelect(const User
&I
) {
3289 SmallVector
<EVT
, 4> ValueVTs
;
3290 ComputeValueVTs(DAG
.getTargetLoweringInfo(), DAG
.getDataLayout(), I
.getType(),
3292 unsigned NumValues
= ValueVTs
.size();
3293 if (NumValues
== 0) return;
3295 SmallVector
<SDValue
, 4> Values(NumValues
);
3296 SDValue Cond
= getValue(I
.getOperand(0));
3297 SDValue LHSVal
= getValue(I
.getOperand(1));
3298 SDValue RHSVal
= getValue(I
.getOperand(2));
3299 auto BaseOps
= {Cond
};
3300 ISD::NodeType OpCode
= Cond
.getValueType().isVector() ?
3301 ISD::VSELECT
: ISD::SELECT
;
3303 bool IsUnaryAbs
= false;
3305 // Min/max matching is only viable if all output VTs are the same.
3306 if (is_splat(ValueVTs
)) {
3307 EVT VT
= ValueVTs
[0];
3308 LLVMContext
&Ctx
= *DAG
.getContext();
3309 auto &TLI
= DAG
.getTargetLoweringInfo();
3311 // We care about the legality of the operation after it has been type
3313 while (TLI
.getTypeAction(Ctx
, VT
) != TargetLoweringBase::TypeLegal
)
3314 VT
= TLI
.getTypeToTransformTo(Ctx
, VT
);
3316 // If the vselect is legal, assume we want to leave this as a vector setcc +
3317 // vselect. Otherwise, if this is going to be scalarized, we want to see if
3318 // min/max is legal on the scalar type.
3319 bool UseScalarMinMax
= VT
.isVector() &&
3320 !TLI
.isOperationLegalOrCustom(ISD::VSELECT
, VT
);
3323 auto SPR
= matchSelectPattern(const_cast<User
*>(&I
), LHS
, RHS
);
3324 ISD::NodeType Opc
= ISD::DELETED_NODE
;
3325 switch (SPR
.Flavor
) {
3326 case SPF_UMAX
: Opc
= ISD::UMAX
; break;
3327 case SPF_UMIN
: Opc
= ISD::UMIN
; break;
3328 case SPF_SMAX
: Opc
= ISD::SMAX
; break;
3329 case SPF_SMIN
: Opc
= ISD::SMIN
; break;
3331 switch (SPR
.NaNBehavior
) {
3332 case SPNB_NA
: llvm_unreachable("No NaN behavior for FP op?");
3333 case SPNB_RETURNS_NAN
: Opc
= ISD::FMINIMUM
; break;
3334 case SPNB_RETURNS_OTHER
: Opc
= ISD::FMINNUM
; break;
3335 case SPNB_RETURNS_ANY
: {
3336 if (TLI
.isOperationLegalOrCustom(ISD::FMINNUM
, VT
))
3338 else if (TLI
.isOperationLegalOrCustom(ISD::FMINIMUM
, VT
))
3339 Opc
= ISD::FMINIMUM
;
3340 else if (UseScalarMinMax
)
3341 Opc
= TLI
.isOperationLegalOrCustom(ISD::FMINNUM
, VT
.getScalarType()) ?
3342 ISD::FMINNUM
: ISD::FMINIMUM
;
3348 switch (SPR
.NaNBehavior
) {
3349 case SPNB_NA
: llvm_unreachable("No NaN behavior for FP op?");
3350 case SPNB_RETURNS_NAN
: Opc
= ISD::FMAXIMUM
; break;
3351 case SPNB_RETURNS_OTHER
: Opc
= ISD::FMAXNUM
; break;
3352 case SPNB_RETURNS_ANY
:
3354 if (TLI
.isOperationLegalOrCustom(ISD::FMAXNUM
, VT
))
3356 else if (TLI
.isOperationLegalOrCustom(ISD::FMAXIMUM
, VT
))
3357 Opc
= ISD::FMAXIMUM
;
3358 else if (UseScalarMinMax
)
3359 Opc
= TLI
.isOperationLegalOrCustom(ISD::FMAXNUM
, VT
.getScalarType()) ?
3360 ISD::FMAXNUM
: ISD::FMAXIMUM
;
3369 // TODO: we need to produce sub(0, abs(X)).
3373 if (!IsUnaryAbs
&& Opc
!= ISD::DELETED_NODE
&&
3374 (TLI
.isOperationLegalOrCustom(Opc
, VT
) ||
3376 TLI
.isOperationLegalOrCustom(Opc
, VT
.getScalarType()))) &&
3377 // If the underlying comparison instruction is used by any other
3378 // instruction, the consumed instructions won't be destroyed, so it is
3379 // not profitable to convert to a min/max.
3380 hasOnlySelectUsers(cast
<SelectInst
>(I
).getCondition())) {
3382 LHSVal
= getValue(LHS
);
3383 RHSVal
= getValue(RHS
);
3389 LHSVal
= getValue(LHS
);
3395 for (unsigned i
= 0; i
!= NumValues
; ++i
) {
3397 DAG
.getNode(OpCode
, getCurSDLoc(),
3398 LHSVal
.getNode()->getValueType(LHSVal
.getResNo() + i
),
3399 SDValue(LHSVal
.getNode(), LHSVal
.getResNo() + i
));
3402 for (unsigned i
= 0; i
!= NumValues
; ++i
) {
3403 SmallVector
<SDValue
, 3> Ops(BaseOps
.begin(), BaseOps
.end());
3404 Ops
.push_back(SDValue(LHSVal
.getNode(), LHSVal
.getResNo() + i
));
3405 Ops
.push_back(SDValue(RHSVal
.getNode(), RHSVal
.getResNo() + i
));
3406 Values
[i
] = DAG
.getNode(
3407 OpCode
, getCurSDLoc(),
3408 LHSVal
.getNode()->getValueType(LHSVal
.getResNo() + i
), Ops
);
3412 setValue(&I
, DAG
.getNode(ISD::MERGE_VALUES
, getCurSDLoc(),
3413 DAG
.getVTList(ValueVTs
), Values
));
3416 void SelectionDAGBuilder::visitTrunc(const User
&I
) {
3417 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3418 SDValue N
= getValue(I
.getOperand(0));
3419 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3421 setValue(&I
, DAG
.getNode(ISD::TRUNCATE
, getCurSDLoc(), DestVT
, N
));
3424 void SelectionDAGBuilder::visitZExt(const User
&I
) {
3425 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3426 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3427 SDValue N
= getValue(I
.getOperand(0));
3428 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3430 setValue(&I
, DAG
.getNode(ISD::ZERO_EXTEND
, getCurSDLoc(), DestVT
, N
));
3433 void SelectionDAGBuilder::visitSExt(const User
&I
) {
3434 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3435 // SExt also can't be a cast to bool for same reason. So, nothing much to do
3436 SDValue N
= getValue(I
.getOperand(0));
3437 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3439 setValue(&I
, DAG
.getNode(ISD::SIGN_EXTEND
, getCurSDLoc(), DestVT
, N
));
3442 void SelectionDAGBuilder::visitFPTrunc(const User
&I
) {
3443 // FPTrunc is never a no-op cast, no need to check
3444 SDValue N
= getValue(I
.getOperand(0));
3445 SDLoc dl
= getCurSDLoc();
3446 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
3447 EVT DestVT
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
3448 setValue(&I
, DAG
.getNode(ISD::FP_ROUND
, dl
, DestVT
, N
,
3449 DAG
.getTargetConstant(
3450 0, dl
, TLI
.getPointerTy(DAG
.getDataLayout()))));
3453 void SelectionDAGBuilder::visitFPExt(const User
&I
) {
3454 // FPExt is never a no-op cast, no need to check
3455 SDValue N
= getValue(I
.getOperand(0));
3456 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3458 setValue(&I
, DAG
.getNode(ISD::FP_EXTEND
, getCurSDLoc(), DestVT
, N
));
3461 void SelectionDAGBuilder::visitFPToUI(const User
&I
) {
3462 // FPToUI is never a no-op cast, no need to check
3463 SDValue N
= getValue(I
.getOperand(0));
3464 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3466 setValue(&I
, DAG
.getNode(ISD::FP_TO_UINT
, getCurSDLoc(), DestVT
, N
));
3469 void SelectionDAGBuilder::visitFPToSI(const User
&I
) {
3470 // FPToSI is never a no-op cast, no need to check
3471 SDValue N
= getValue(I
.getOperand(0));
3472 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3474 setValue(&I
, DAG
.getNode(ISD::FP_TO_SINT
, getCurSDLoc(), DestVT
, N
));
3477 void SelectionDAGBuilder::visitUIToFP(const User
&I
) {
3478 // UIToFP is never a no-op cast, no need to check
3479 SDValue N
= getValue(I
.getOperand(0));
3480 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3482 setValue(&I
, DAG
.getNode(ISD::UINT_TO_FP
, getCurSDLoc(), DestVT
, N
));
3485 void SelectionDAGBuilder::visitSIToFP(const User
&I
) {
3486 // SIToFP is never a no-op cast, no need to check
3487 SDValue N
= getValue(I
.getOperand(0));
3488 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3490 setValue(&I
, DAG
.getNode(ISD::SINT_TO_FP
, getCurSDLoc(), DestVT
, N
));
3493 void SelectionDAGBuilder::visitPtrToInt(const User
&I
) {
3494 // What to do depends on the size of the integer and the size of the pointer.
3495 // We can either truncate, zero extend, or no-op, accordingly.
3496 SDValue N
= getValue(I
.getOperand(0));
3497 auto &TLI
= DAG
.getTargetLoweringInfo();
3498 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3501 TLI
.getMemValueType(DAG
.getDataLayout(), I
.getOperand(0)->getType());
3502 N
= DAG
.getPtrExtOrTrunc(N
, getCurSDLoc(), PtrMemVT
);
3503 N
= DAG
.getZExtOrTrunc(N
, getCurSDLoc(), DestVT
);
3507 void SelectionDAGBuilder::visitIntToPtr(const User
&I
) {
3508 // What to do depends on the size of the integer and the size of the pointer.
3509 // We can either truncate, zero extend, or no-op, accordingly.
3510 SDValue N
= getValue(I
.getOperand(0));
3511 auto &TLI
= DAG
.getTargetLoweringInfo();
3512 EVT DestVT
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
3513 EVT PtrMemVT
= TLI
.getMemValueType(DAG
.getDataLayout(), I
.getType());
3514 N
= DAG
.getZExtOrTrunc(N
, getCurSDLoc(), PtrMemVT
);
3515 N
= DAG
.getPtrExtOrTrunc(N
, getCurSDLoc(), DestVT
);
3519 void SelectionDAGBuilder::visitBitCast(const User
&I
) {
3520 SDValue N
= getValue(I
.getOperand(0));
3521 SDLoc dl
= getCurSDLoc();
3522 EVT DestVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
3525 // BitCast assures us that source and destination are the same size so this is
3526 // either a BITCAST or a no-op.
3527 if (DestVT
!= N
.getValueType())
3528 setValue(&I
, DAG
.getNode(ISD::BITCAST
, dl
,
3529 DestVT
, N
)); // convert types.
3530 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3531 // might fold any kind of constant expression to an integer constant and that
3532 // is not what we are looking for. Only recognize a bitcast of a genuine
3533 // constant integer as an opaque constant.
3534 else if(ConstantInt
*C
= dyn_cast
<ConstantInt
>(I
.getOperand(0)))
3535 setValue(&I
, DAG
.getConstant(C
->getValue(), dl
, DestVT
, /*isTarget=*/false,
3538 setValue(&I
, N
); // noop cast.
3541 void SelectionDAGBuilder::visitAddrSpaceCast(const User
&I
) {
3542 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
3543 const Value
*SV
= I
.getOperand(0);
3544 SDValue N
= getValue(SV
);
3545 EVT DestVT
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
3547 unsigned SrcAS
= SV
->getType()->getPointerAddressSpace();
3548 unsigned DestAS
= I
.getType()->getPointerAddressSpace();
3550 if (!TLI
.isNoopAddrSpaceCast(SrcAS
, DestAS
))
3551 N
= DAG
.getAddrSpaceCast(getCurSDLoc(), DestVT
, N
, SrcAS
, DestAS
);
3556 void SelectionDAGBuilder::visitInsertElement(const User
&I
) {
3557 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
3558 SDValue InVec
= getValue(I
.getOperand(0));
3559 SDValue InVal
= getValue(I
.getOperand(1));
3560 SDValue InIdx
= DAG
.getSExtOrTrunc(getValue(I
.getOperand(2)), getCurSDLoc(),
3561 TLI
.getVectorIdxTy(DAG
.getDataLayout()));
3562 setValue(&I
, DAG
.getNode(ISD::INSERT_VECTOR_ELT
, getCurSDLoc(),
3563 TLI
.getValueType(DAG
.getDataLayout(), I
.getType()),
3564 InVec
, InVal
, InIdx
));
3567 void SelectionDAGBuilder::visitExtractElement(const User
&I
) {
3568 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
3569 SDValue InVec
= getValue(I
.getOperand(0));
3570 SDValue InIdx
= DAG
.getSExtOrTrunc(getValue(I
.getOperand(1)), getCurSDLoc(),
3571 TLI
.getVectorIdxTy(DAG
.getDataLayout()));
3572 setValue(&I
, DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, getCurSDLoc(),
3573 TLI
.getValueType(DAG
.getDataLayout(), I
.getType()),
3577 void SelectionDAGBuilder::visitShuffleVector(const User
&I
) {
3578 SDValue Src1
= getValue(I
.getOperand(0));
3579 SDValue Src2
= getValue(I
.getOperand(1));
3580 Constant
*MaskV
= cast
<Constant
>(I
.getOperand(2));
3581 SDLoc DL
= getCurSDLoc();
3582 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
3583 EVT VT
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
3584 EVT SrcVT
= Src1
.getValueType();
3585 unsigned SrcNumElts
= SrcVT
.getVectorNumElements();
3587 if (MaskV
->isNullValue() && VT
.isScalableVector()) {
3588 // Canonical splat form of first element of first input vector.
3589 SDValue FirstElt
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
,
3590 SrcVT
.getScalarType(), Src1
,
3591 DAG
.getConstant(0, DL
,
3592 TLI
.getVectorIdxTy(DAG
.getDataLayout())));
3593 setValue(&I
, DAG
.getNode(ISD::SPLAT_VECTOR
, DL
, VT
, FirstElt
));
3597 // For now, we only handle splats for scalable vectors.
3598 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3599 // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3600 assert(!VT
.isScalableVector() && "Unsupported scalable vector shuffle");
3602 SmallVector
<int, 8> Mask
;
3603 ShuffleVectorInst::getShuffleMask(MaskV
, Mask
);
3604 unsigned MaskNumElts
= Mask
.size();
3606 if (SrcNumElts
== MaskNumElts
) {
3607 setValue(&I
, DAG
.getVectorShuffle(VT
, DL
, Src1
, Src2
, Mask
));
3611 // Normalize the shuffle vector since mask and vector length don't match.
3612 if (SrcNumElts
< MaskNumElts
) {
3613 // Mask is longer than the source vectors. We can use concatenate vector to
3614 // make the mask and vectors lengths match.
3616 if (MaskNumElts
% SrcNumElts
== 0) {
3617 // Mask length is a multiple of the source vector length.
3618 // Check if the shuffle is some kind of concatenation of the input
3620 unsigned NumConcat
= MaskNumElts
/ SrcNumElts
;
3621 bool IsConcat
= true;
3622 SmallVector
<int, 8> ConcatSrcs(NumConcat
, -1);
3623 for (unsigned i
= 0; i
!= MaskNumElts
; ++i
) {
3627 // Ensure the indices in each SrcVT sized piece are sequential and that
3628 // the same source is used for the whole piece.
3629 if ((Idx
% SrcNumElts
!= (i
% SrcNumElts
)) ||
3630 (ConcatSrcs
[i
/ SrcNumElts
] >= 0 &&
3631 ConcatSrcs
[i
/ SrcNumElts
] != (int)(Idx
/ SrcNumElts
))) {
3635 // Remember which source this index came from.
3636 ConcatSrcs
[i
/ SrcNumElts
] = Idx
/ SrcNumElts
;
3639 // The shuffle is concatenating multiple vectors together. Just emit
3640 // a CONCAT_VECTORS operation.
3642 SmallVector
<SDValue
, 8> ConcatOps
;
3643 for (auto Src
: ConcatSrcs
) {
3645 ConcatOps
.push_back(DAG
.getUNDEF(SrcVT
));
3647 ConcatOps
.push_back(Src1
);
3649 ConcatOps
.push_back(Src2
);
3651 setValue(&I
, DAG
.getNode(ISD::CONCAT_VECTORS
, DL
, VT
, ConcatOps
));
3656 unsigned PaddedMaskNumElts
= alignTo(MaskNumElts
, SrcNumElts
);
3657 unsigned NumConcat
= PaddedMaskNumElts
/ SrcNumElts
;
3658 EVT PaddedVT
= EVT::getVectorVT(*DAG
.getContext(), VT
.getScalarType(),
3661 // Pad both vectors with undefs to make them the same length as the mask.
3662 SDValue UndefVal
= DAG
.getUNDEF(SrcVT
);
3664 SmallVector
<SDValue
, 8> MOps1(NumConcat
, UndefVal
);
3665 SmallVector
<SDValue
, 8> MOps2(NumConcat
, UndefVal
);
3669 Src1
= DAG
.getNode(ISD::CONCAT_VECTORS
, DL
, PaddedVT
, MOps1
);
3670 Src2
= DAG
.getNode(ISD::CONCAT_VECTORS
, DL
, PaddedVT
, MOps2
);
3672 // Readjust mask for new input vector length.
3673 SmallVector
<int, 8> MappedOps(PaddedMaskNumElts
, -1);
3674 for (unsigned i
= 0; i
!= MaskNumElts
; ++i
) {
3676 if (Idx
>= (int)SrcNumElts
)
3677 Idx
-= SrcNumElts
- PaddedMaskNumElts
;
3681 SDValue Result
= DAG
.getVectorShuffle(PaddedVT
, DL
, Src1
, Src2
, MappedOps
);
3683 // If the concatenated vector was padded, extract a subvector with the
3684 // correct number of elements.
3685 if (MaskNumElts
!= PaddedMaskNumElts
)
3686 Result
= DAG
.getNode(
3687 ISD::EXTRACT_SUBVECTOR
, DL
, VT
, Result
,
3688 DAG
.getConstant(0, DL
, TLI
.getVectorIdxTy(DAG
.getDataLayout())));
3690 setValue(&I
, Result
);
3694 if (SrcNumElts
> MaskNumElts
) {
3695 // Analyze the access pattern of the vector to see if we can extract
3696 // two subvectors and do the shuffle.
3697 int StartIdx
[2] = { -1, -1 }; // StartIdx to extract from
3698 bool CanExtract
= true;
3699 for (int Idx
: Mask
) {
3704 if (Idx
>= (int)SrcNumElts
) {
3709 // If all the indices come from the same MaskNumElts sized portion of
3710 // the sources we can use extract. Also make sure the extract wouldn't
3711 // extract past the end of the source.
3712 int NewStartIdx
= alignDown(Idx
, MaskNumElts
);
3713 if (NewStartIdx
+ MaskNumElts
> SrcNumElts
||
3714 (StartIdx
[Input
] >= 0 && StartIdx
[Input
] != NewStartIdx
))
3716 // Make sure we always update StartIdx as we use it to track if all
3717 // elements are undef.
3718 StartIdx
[Input
] = NewStartIdx
;
3721 if (StartIdx
[0] < 0 && StartIdx
[1] < 0) {
3722 setValue(&I
, DAG
.getUNDEF(VT
)); // Vectors are not used.
3726 // Extract appropriate subvector and generate a vector shuffle
3727 for (unsigned Input
= 0; Input
< 2; ++Input
) {
3728 SDValue
&Src
= Input
== 0 ? Src1
: Src2
;
3729 if (StartIdx
[Input
] < 0)
3730 Src
= DAG
.getUNDEF(VT
);
3733 ISD::EXTRACT_SUBVECTOR
, DL
, VT
, Src
,
3734 DAG
.getConstant(StartIdx
[Input
], DL
,
3735 TLI
.getVectorIdxTy(DAG
.getDataLayout())));
3739 // Calculate new mask.
3740 SmallVector
<int, 8> MappedOps(Mask
.begin(), Mask
.end());
3741 for (int &Idx
: MappedOps
) {
3742 if (Idx
>= (int)SrcNumElts
)
3743 Idx
-= SrcNumElts
+ StartIdx
[1] - MaskNumElts
;
3748 setValue(&I
, DAG
.getVectorShuffle(VT
, DL
, Src1
, Src2
, MappedOps
));
3753 // We can't use either concat vectors or extract subvectors so fall back to
3754 // replacing the shuffle with extract and build vector.
3755 // to insert and build vector.
3756 EVT EltVT
= VT
.getVectorElementType();
3757 EVT IdxVT
= TLI
.getVectorIdxTy(DAG
.getDataLayout());
3758 SmallVector
<SDValue
,8> Ops
;
3759 for (int Idx
: Mask
) {
3763 Res
= DAG
.getUNDEF(EltVT
);
3765 SDValue
&Src
= Idx
< (int)SrcNumElts
? Src1
: Src2
;
3766 if (Idx
>= (int)SrcNumElts
) Idx
-= SrcNumElts
;
3768 Res
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
,
3769 EltVT
, Src
, DAG
.getConstant(Idx
, DL
, IdxVT
));
3775 setValue(&I
, DAG
.getBuildVector(VT
, DL
, Ops
));
3778 void SelectionDAGBuilder::visitInsertValue(const User
&I
) {
3779 ArrayRef
<unsigned> Indices
;
3780 if (const InsertValueInst
*IV
= dyn_cast
<InsertValueInst
>(&I
))
3781 Indices
= IV
->getIndices();
3783 Indices
= cast
<ConstantExpr
>(&I
)->getIndices();
3785 const Value
*Op0
= I
.getOperand(0);
3786 const Value
*Op1
= I
.getOperand(1);
3787 Type
*AggTy
= I
.getType();
3788 Type
*ValTy
= Op1
->getType();
3789 bool IntoUndef
= isa
<UndefValue
>(Op0
);
3790 bool FromUndef
= isa
<UndefValue
>(Op1
);
3792 unsigned LinearIndex
= ComputeLinearIndex(AggTy
, Indices
);
3794 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
3795 SmallVector
<EVT
, 4> AggValueVTs
;
3796 ComputeValueVTs(TLI
, DAG
.getDataLayout(), AggTy
, AggValueVTs
);
3797 SmallVector
<EVT
, 4> ValValueVTs
;
3798 ComputeValueVTs(TLI
, DAG
.getDataLayout(), ValTy
, ValValueVTs
);
3800 unsigned NumAggValues
= AggValueVTs
.size();
3801 unsigned NumValValues
= ValValueVTs
.size();
3802 SmallVector
<SDValue
, 4> Values(NumAggValues
);
3804 // Ignore an insertvalue that produces an empty object
3805 if (!NumAggValues
) {
3806 setValue(&I
, DAG
.getUNDEF(MVT(MVT::Other
)));
3810 SDValue Agg
= getValue(Op0
);
3812 // Copy the beginning value(s) from the original aggregate.
3813 for (; i
!= LinearIndex
; ++i
)
3814 Values
[i
] = IntoUndef
? DAG
.getUNDEF(AggValueVTs
[i
]) :
3815 SDValue(Agg
.getNode(), Agg
.getResNo() + i
);
3816 // Copy values from the inserted value(s).
3818 SDValue Val
= getValue(Op1
);
3819 for (; i
!= LinearIndex
+ NumValValues
; ++i
)
3820 Values
[i
] = FromUndef
? DAG
.getUNDEF(AggValueVTs
[i
]) :
3821 SDValue(Val
.getNode(), Val
.getResNo() + i
- LinearIndex
);
3823 // Copy remaining value(s) from the original aggregate.
3824 for (; i
!= NumAggValues
; ++i
)
3825 Values
[i
] = IntoUndef
? DAG
.getUNDEF(AggValueVTs
[i
]) :
3826 SDValue(Agg
.getNode(), Agg
.getResNo() + i
);
3828 setValue(&I
, DAG
.getNode(ISD::MERGE_VALUES
, getCurSDLoc(),
3829 DAG
.getVTList(AggValueVTs
), Values
));
3832 void SelectionDAGBuilder::visitExtractValue(const User
&I
) {
3833 ArrayRef
<unsigned> Indices
;
3834 if (const ExtractValueInst
*EV
= dyn_cast
<ExtractValueInst
>(&I
))
3835 Indices
= EV
->getIndices();
3837 Indices
= cast
<ConstantExpr
>(&I
)->getIndices();
3839 const Value
*Op0
= I
.getOperand(0);
3840 Type
*AggTy
= Op0
->getType();
3841 Type
*ValTy
= I
.getType();
3842 bool OutOfUndef
= isa
<UndefValue
>(Op0
);
3844 unsigned LinearIndex
= ComputeLinearIndex(AggTy
, Indices
);
3846 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
3847 SmallVector
<EVT
, 4> ValValueVTs
;
3848 ComputeValueVTs(TLI
, DAG
.getDataLayout(), ValTy
, ValValueVTs
);
3850 unsigned NumValValues
= ValValueVTs
.size();
3852 // Ignore a extractvalue that produces an empty object
3853 if (!NumValValues
) {
3854 setValue(&I
, DAG
.getUNDEF(MVT(MVT::Other
)));
3858 SmallVector
<SDValue
, 4> Values(NumValValues
);
3860 SDValue Agg
= getValue(Op0
);
3861 // Copy out the selected value(s).
3862 for (unsigned i
= LinearIndex
; i
!= LinearIndex
+ NumValValues
; ++i
)
3863 Values
[i
- LinearIndex
] =
3865 DAG
.getUNDEF(Agg
.getNode()->getValueType(Agg
.getResNo() + i
)) :
3866 SDValue(Agg
.getNode(), Agg
.getResNo() + i
);
3868 setValue(&I
, DAG
.getNode(ISD::MERGE_VALUES
, getCurSDLoc(),
3869 DAG
.getVTList(ValValueVTs
), Values
));
3872 void SelectionDAGBuilder::visitGetElementPtr(const User
&I
) {
3873 Value
*Op0
= I
.getOperand(0);
3874 // Note that the pointer operand may be a vector of pointers. Take the scalar
3875 // element which holds a pointer.
3876 unsigned AS
= Op0
->getType()->getScalarType()->getPointerAddressSpace();
3877 SDValue N
= getValue(Op0
);
3878 SDLoc dl
= getCurSDLoc();
3879 auto &TLI
= DAG
.getTargetLoweringInfo();
3880 MVT PtrTy
= TLI
.getPointerTy(DAG
.getDataLayout(), AS
);
3881 MVT PtrMemTy
= TLI
.getPointerMemTy(DAG
.getDataLayout(), AS
);
3883 // Normalize Vector GEP - all scalar operands should be converted to the
3885 unsigned VectorWidth
= I
.getType()->isVectorTy() ?
3886 I
.getType()->getVectorNumElements() : 0;
3888 if (VectorWidth
&& !N
.getValueType().isVector()) {
3889 LLVMContext
&Context
= *DAG
.getContext();
3890 EVT VT
= EVT::getVectorVT(Context
, N
.getValueType(), VectorWidth
);
3891 N
= DAG
.getSplatBuildVector(VT
, dl
, N
);
3894 for (gep_type_iterator GTI
= gep_type_begin(&I
), E
= gep_type_end(&I
);
3896 const Value
*Idx
= GTI
.getOperand();
3897 if (StructType
*StTy
= GTI
.getStructTypeOrNull()) {
3898 unsigned Field
= cast
<Constant
>(Idx
)->getUniqueInteger().getZExtValue();
3901 uint64_t Offset
= DL
->getStructLayout(StTy
)->getElementOffset(Field
);
3903 // In an inbounds GEP with an offset that is nonnegative even when
3904 // interpreted as signed, assume there is no unsigned overflow.
3906 if (int64_t(Offset
) >= 0 && cast
<GEPOperator
>(I
).isInBounds())
3907 Flags
.setNoUnsignedWrap(true);
3909 N
= DAG
.getNode(ISD::ADD
, dl
, N
.getValueType(), N
,
3910 DAG
.getConstant(Offset
, dl
, N
.getValueType()), Flags
);
3913 unsigned IdxSize
= DAG
.getDataLayout().getIndexSizeInBits(AS
);
3914 MVT IdxTy
= MVT::getIntegerVT(IdxSize
);
3915 APInt
ElementSize(IdxSize
, DL
->getTypeAllocSize(GTI
.getIndexedType()));
3917 // If this is a scalar constant or a splat vector of constants,
3918 // handle it quickly.
3919 const auto *C
= dyn_cast
<Constant
>(Idx
);
3920 if (C
&& isa
<VectorType
>(C
->getType()))
3921 C
= C
->getSplatValue();
3923 if (const auto *CI
= dyn_cast_or_null
<ConstantInt
>(C
)) {
3926 APInt Offs
= ElementSize
* CI
->getValue().sextOrTrunc(IdxSize
);
3927 LLVMContext
&Context
= *DAG
.getContext();
3928 SDValue OffsVal
= VectorWidth
?
3929 DAG
.getConstant(Offs
, dl
, EVT::getVectorVT(Context
, IdxTy
, VectorWidth
)) :
3930 DAG
.getConstant(Offs
, dl
, IdxTy
);
3932 // In an inbounds GEP with an offset that is nonnegative even when
3933 // interpreted as signed, assume there is no unsigned overflow.
3935 if (Offs
.isNonNegative() && cast
<GEPOperator
>(I
).isInBounds())
3936 Flags
.setNoUnsignedWrap(true);
3938 OffsVal
= DAG
.getSExtOrTrunc(OffsVal
, dl
, N
.getValueType());
3940 N
= DAG
.getNode(ISD::ADD
, dl
, N
.getValueType(), N
, OffsVal
, Flags
);
3944 // N = N + Idx * ElementSize;
3945 SDValue IdxN
= getValue(Idx
);
3947 if (!IdxN
.getValueType().isVector() && VectorWidth
) {
3948 EVT VT
= EVT::getVectorVT(*Context
, IdxN
.getValueType(), VectorWidth
);
3949 IdxN
= DAG
.getSplatBuildVector(VT
, dl
, IdxN
);
3952 // If the index is smaller or larger than intptr_t, truncate or extend
3954 IdxN
= DAG
.getSExtOrTrunc(IdxN
, dl
, N
.getValueType());
3956 // If this is a multiply by a power of two, turn it into a shl
3957 // immediately. This is a very common case.
3958 if (ElementSize
!= 1) {
3959 if (ElementSize
.isPowerOf2()) {
3960 unsigned Amt
= ElementSize
.logBase2();
3961 IdxN
= DAG
.getNode(ISD::SHL
, dl
,
3962 N
.getValueType(), IdxN
,
3963 DAG
.getConstant(Amt
, dl
, IdxN
.getValueType()));
3965 SDValue Scale
= DAG
.getConstant(ElementSize
.getZExtValue(), dl
,
3966 IdxN
.getValueType());
3967 IdxN
= DAG
.getNode(ISD::MUL
, dl
,
3968 N
.getValueType(), IdxN
, Scale
);
3972 N
= DAG
.getNode(ISD::ADD
, dl
,
3973 N
.getValueType(), N
, IdxN
);
3977 if (PtrMemTy
!= PtrTy
&& !cast
<GEPOperator
>(I
).isInBounds())
3978 N
= DAG
.getPtrExtendInReg(N
, dl
, PtrMemTy
);
3983 void SelectionDAGBuilder::visitAlloca(const AllocaInst
&I
) {
3984 // If this is a fixed sized alloca in the entry block of the function,
3985 // allocate it statically on the stack.
3986 if (FuncInfo
.StaticAllocaMap
.count(&I
))
3987 return; // getValue will auto-populate this.
3989 SDLoc dl
= getCurSDLoc();
3990 Type
*Ty
= I
.getAllocatedType();
3991 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
3992 auto &DL
= DAG
.getDataLayout();
3993 uint64_t TySize
= DL
.getTypeAllocSize(Ty
);
3995 std::max((unsigned)DL
.getPrefTypeAlignment(Ty
), I
.getAlignment());
3997 SDValue AllocSize
= getValue(I
.getArraySize());
3999 EVT IntPtr
= TLI
.getPointerTy(DAG
.getDataLayout(), DL
.getAllocaAddrSpace());
4000 if (AllocSize
.getValueType() != IntPtr
)
4001 AllocSize
= DAG
.getZExtOrTrunc(AllocSize
, dl
, IntPtr
);
4003 AllocSize
= DAG
.getNode(ISD::MUL
, dl
, IntPtr
,
4005 DAG
.getConstant(TySize
, dl
, IntPtr
));
4007 // Handle alignment. If the requested alignment is less than or equal to
4008 // the stack alignment, ignore it. If the size is greater than or equal to
4009 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4010 unsigned StackAlign
=
4011 DAG
.getSubtarget().getFrameLowering()->getStackAlignment();
4012 if (Align
<= StackAlign
)
4015 // Round the size of the allocation up to the stack alignment size
4016 // by add SA-1 to the size. This doesn't overflow because we're computing
4017 // an address inside an alloca.
4019 Flags
.setNoUnsignedWrap(true);
4020 AllocSize
= DAG
.getNode(ISD::ADD
, dl
, AllocSize
.getValueType(), AllocSize
,
4021 DAG
.getConstant(StackAlign
- 1, dl
, IntPtr
), Flags
);
4023 // Mask out the low bits for alignment purposes.
4025 DAG
.getNode(ISD::AND
, dl
, AllocSize
.getValueType(), AllocSize
,
4026 DAG
.getConstant(~(uint64_t)(StackAlign
- 1), dl
, IntPtr
));
4028 SDValue Ops
[] = {getRoot(), AllocSize
, DAG
.getConstant(Align
, dl
, IntPtr
)};
4029 SDVTList VTs
= DAG
.getVTList(AllocSize
.getValueType(), MVT::Other
);
4030 SDValue DSA
= DAG
.getNode(ISD::DYNAMIC_STACKALLOC
, dl
, VTs
, Ops
);
4032 DAG
.setRoot(DSA
.getValue(1));
4034 assert(FuncInfo
.MF
->getFrameInfo().hasVarSizedObjects());
4037 void SelectionDAGBuilder::visitLoad(const LoadInst
&I
) {
4039 return visitAtomicLoad(I
);
4041 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
4042 const Value
*SV
= I
.getOperand(0);
4043 if (TLI
.supportSwiftError()) {
4044 // Swifterror values can come from either a function parameter with
4045 // swifterror attribute or an alloca with swifterror attribute.
4046 if (const Argument
*Arg
= dyn_cast
<Argument
>(SV
)) {
4047 if (Arg
->hasSwiftErrorAttr())
4048 return visitLoadFromSwiftError(I
);
4051 if (const AllocaInst
*Alloca
= dyn_cast
<AllocaInst
>(SV
)) {
4052 if (Alloca
->isSwiftError())
4053 return visitLoadFromSwiftError(I
);
4057 SDValue Ptr
= getValue(SV
);
4059 Type
*Ty
= I
.getType();
4061 bool isVolatile
= I
.isVolatile();
4062 bool isNonTemporal
= I
.hasMetadata(LLVMContext::MD_nontemporal
);
4063 bool isInvariant
= I
.hasMetadata(LLVMContext::MD_invariant_load
);
4064 bool isDereferenceable
=
4065 isDereferenceablePointer(SV
, I
.getType(), DAG
.getDataLayout());
4066 unsigned Alignment
= I
.getAlignment();
4069 I
.getAAMetadata(AAInfo
);
4070 const MDNode
*Ranges
= I
.getMetadata(LLVMContext::MD_range
);
4072 SmallVector
<EVT
, 4> ValueVTs
, MemVTs
;
4073 SmallVector
<uint64_t, 4> Offsets
;
4074 ComputeValueVTs(TLI
, DAG
.getDataLayout(), Ty
, ValueVTs
, &MemVTs
, &Offsets
);
4075 unsigned NumValues
= ValueVTs
.size();
4080 bool ConstantMemory
= false;
4082 // Serialize volatile loads with other side effects.
4084 else if (NumValues
> MaxParallelChains
)
4085 Root
= getMemoryRoot();
4087 AA
->pointsToConstantMemory(MemoryLocation(
4089 LocationSize::precise(DAG
.getDataLayout().getTypeStoreSize(Ty
)),
4091 // Do not serialize (non-volatile) loads of constant memory with anything.
4092 Root
= DAG
.getEntryNode();
4093 ConstantMemory
= true;
4095 // Do not serialize non-volatile loads against each other.
4096 Root
= DAG
.getRoot();
4099 SDLoc dl
= getCurSDLoc();
4102 Root
= TLI
.prepareVolatileOrAtomicLoad(Root
, dl
, DAG
);
4104 // An aggregate load cannot wrap around the address space, so offsets to its
4105 // parts don't wrap either.
4107 Flags
.setNoUnsignedWrap(true);
4109 SmallVector
<SDValue
, 4> Values(NumValues
);
4110 SmallVector
<SDValue
, 4> Chains(std::min(MaxParallelChains
, NumValues
));
4111 EVT PtrVT
= Ptr
.getValueType();
4112 unsigned ChainI
= 0;
4113 for (unsigned i
= 0; i
!= NumValues
; ++i
, ++ChainI
) {
4114 // Serializing loads here may result in excessive register pressure, and
4115 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4116 // could recover a bit by hoisting nodes upward in the chain by recognizing
4117 // they are side-effect free or do not alias. The optimizer should really
4118 // avoid this case by converting large object/array copies to llvm.memcpy
4119 // (MaxParallelChains should always remain as failsafe).
4120 if (ChainI
== MaxParallelChains
) {
4121 assert(PendingLoads
.empty() && "PendingLoads must be serialized first");
4122 SDValue Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
4123 makeArrayRef(Chains
.data(), ChainI
));
4127 SDValue A
= DAG
.getNode(ISD::ADD
, dl
,
4129 DAG
.getConstant(Offsets
[i
], dl
, PtrVT
),
4131 auto MMOFlags
= MachineMemOperand::MONone
;
4133 MMOFlags
|= MachineMemOperand::MOVolatile
;
4135 MMOFlags
|= MachineMemOperand::MONonTemporal
;
4137 MMOFlags
|= MachineMemOperand::MOInvariant
;
4138 if (isDereferenceable
)
4139 MMOFlags
|= MachineMemOperand::MODereferenceable
;
4140 MMOFlags
|= TLI
.getMMOFlags(I
);
4142 SDValue L
= DAG
.getLoad(MemVTs
[i
], dl
, Root
, A
,
4143 MachinePointerInfo(SV
, Offsets
[i
]), Alignment
,
4144 MMOFlags
, AAInfo
, Ranges
);
4145 Chains
[ChainI
] = L
.getValue(1);
4147 if (MemVTs
[i
] != ValueVTs
[i
])
4148 L
= DAG
.getZExtOrTrunc(L
, dl
, ValueVTs
[i
]);
4153 if (!ConstantMemory
) {
4154 SDValue Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
4155 makeArrayRef(Chains
.data(), ChainI
));
4159 PendingLoads
.push_back(Chain
);
4162 setValue(&I
, DAG
.getNode(ISD::MERGE_VALUES
, dl
,
4163 DAG
.getVTList(ValueVTs
), Values
));
4166 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst
&I
) {
4167 assert(DAG
.getTargetLoweringInfo().supportSwiftError() &&
4168 "call visitStoreToSwiftError when backend supports swifterror");
4170 SmallVector
<EVT
, 4> ValueVTs
;
4171 SmallVector
<uint64_t, 4> Offsets
;
4172 const Value
*SrcV
= I
.getOperand(0);
4173 ComputeValueVTs(DAG
.getTargetLoweringInfo(), DAG
.getDataLayout(),
4174 SrcV
->getType(), ValueVTs
, &Offsets
);
4175 assert(ValueVTs
.size() == 1 && Offsets
[0] == 0 &&
4176 "expect a single EVT for swifterror");
4178 SDValue Src
= getValue(SrcV
);
4179 // Create a virtual register, then update the virtual register.
4181 SwiftError
.getOrCreateVRegDefAt(&I
, FuncInfo
.MBB
, I
.getPointerOperand());
4182 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4183 // Chain can be getRoot or getControlRoot.
4184 SDValue CopyNode
= DAG
.getCopyToReg(getRoot(), getCurSDLoc(), VReg
,
4185 SDValue(Src
.getNode(), Src
.getResNo()));
4186 DAG
.setRoot(CopyNode
);
4189 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst
&I
) {
4190 assert(DAG
.getTargetLoweringInfo().supportSwiftError() &&
4191 "call visitLoadFromSwiftError when backend supports swifterror");
4193 assert(!I
.isVolatile() &&
4194 !I
.hasMetadata(LLVMContext::MD_nontemporal
) &&
4195 !I
.hasMetadata(LLVMContext::MD_invariant_load
) &&
4196 "Support volatile, non temporal, invariant for load_from_swift_error");
4198 const Value
*SV
= I
.getOperand(0);
4199 Type
*Ty
= I
.getType();
4201 I
.getAAMetadata(AAInfo
);
4204 !AA
->pointsToConstantMemory(MemoryLocation(
4205 SV
, LocationSize::precise(DAG
.getDataLayout().getTypeStoreSize(Ty
)),
4207 "load_from_swift_error should not be constant memory");
4209 SmallVector
<EVT
, 4> ValueVTs
;
4210 SmallVector
<uint64_t, 4> Offsets
;
4211 ComputeValueVTs(DAG
.getTargetLoweringInfo(), DAG
.getDataLayout(), Ty
,
4212 ValueVTs
, &Offsets
);
4213 assert(ValueVTs
.size() == 1 && Offsets
[0] == 0 &&
4214 "expect a single EVT for swifterror");
4216 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4217 SDValue L
= DAG
.getCopyFromReg(
4218 getRoot(), getCurSDLoc(),
4219 SwiftError
.getOrCreateVRegUseAt(&I
, FuncInfo
.MBB
, SV
), ValueVTs
[0]);
4224 void SelectionDAGBuilder::visitStore(const StoreInst
&I
) {
4226 return visitAtomicStore(I
);
4228 const Value
*SrcV
= I
.getOperand(0);
4229 const Value
*PtrV
= I
.getOperand(1);
4231 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
4232 if (TLI
.supportSwiftError()) {
4233 // Swifterror values can come from either a function parameter with
4234 // swifterror attribute or an alloca with swifterror attribute.
4235 if (const Argument
*Arg
= dyn_cast
<Argument
>(PtrV
)) {
4236 if (Arg
->hasSwiftErrorAttr())
4237 return visitStoreToSwiftError(I
);
4240 if (const AllocaInst
*Alloca
= dyn_cast
<AllocaInst
>(PtrV
)) {
4241 if (Alloca
->isSwiftError())
4242 return visitStoreToSwiftError(I
);
4246 SmallVector
<EVT
, 4> ValueVTs
, MemVTs
;
4247 SmallVector
<uint64_t, 4> Offsets
;
4248 ComputeValueVTs(DAG
.getTargetLoweringInfo(), DAG
.getDataLayout(),
4249 SrcV
->getType(), ValueVTs
, &MemVTs
, &Offsets
);
4250 unsigned NumValues
= ValueVTs
.size();
4254 // Get the lowered operands. Note that we do this after
4255 // checking if NumResults is zero, because with zero results
4256 // the operands won't have values in the map.
4257 SDValue Src
= getValue(SrcV
);
4258 SDValue Ptr
= getValue(PtrV
);
4260 SDValue Root
= I
.isVolatile() ? getRoot() : getMemoryRoot();
4261 SmallVector
<SDValue
, 4> Chains(std::min(MaxParallelChains
, NumValues
));
4262 SDLoc dl
= getCurSDLoc();
4263 unsigned Alignment
= I
.getAlignment();
4265 I
.getAAMetadata(AAInfo
);
4267 auto MMOFlags
= MachineMemOperand::MONone
;
4269 MMOFlags
|= MachineMemOperand::MOVolatile
;
4270 if (I
.hasMetadata(LLVMContext::MD_nontemporal
))
4271 MMOFlags
|= MachineMemOperand::MONonTemporal
;
4272 MMOFlags
|= TLI
.getMMOFlags(I
);
4274 // An aggregate load cannot wrap around the address space, so offsets to its
4275 // parts don't wrap either.
4277 Flags
.setNoUnsignedWrap(true);
4279 unsigned ChainI
= 0;
4280 for (unsigned i
= 0; i
!= NumValues
; ++i
, ++ChainI
) {
4281 // See visitLoad comments.
4282 if (ChainI
== MaxParallelChains
) {
4283 SDValue Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
4284 makeArrayRef(Chains
.data(), ChainI
));
4288 SDValue Add
= DAG
.getMemBasePlusOffset(Ptr
, Offsets
[i
], dl
, Flags
);
4289 SDValue Val
= SDValue(Src
.getNode(), Src
.getResNo() + i
);
4290 if (MemVTs
[i
] != ValueVTs
[i
])
4291 Val
= DAG
.getPtrExtOrTrunc(Val
, dl
, MemVTs
[i
]);
4293 DAG
.getStore(Root
, dl
, Val
, Add
, MachinePointerInfo(PtrV
, Offsets
[i
]),
4294 Alignment
, MMOFlags
, AAInfo
);
4295 Chains
[ChainI
] = St
;
4298 SDValue StoreNode
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
4299 makeArrayRef(Chains
.data(), ChainI
));
4300 DAG
.setRoot(StoreNode
);
4303 void SelectionDAGBuilder::visitMaskedStore(const CallInst
&I
,
4304 bool IsCompressing
) {
4305 SDLoc sdl
= getCurSDLoc();
4307 auto getMaskedStoreOps
= [&](Value
* &Ptr
, Value
* &Mask
, Value
* &Src0
,
4308 unsigned& Alignment
) {
4309 // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4310 Src0
= I
.getArgOperand(0);
4311 Ptr
= I
.getArgOperand(1);
4312 Alignment
= cast
<ConstantInt
>(I
.getArgOperand(2))->getZExtValue();
4313 Mask
= I
.getArgOperand(3);
4315 auto getCompressingStoreOps
= [&](Value
* &Ptr
, Value
* &Mask
, Value
* &Src0
,
4316 unsigned& Alignment
) {
4317 // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4318 Src0
= I
.getArgOperand(0);
4319 Ptr
= I
.getArgOperand(1);
4320 Mask
= I
.getArgOperand(2);
4324 Value
*PtrOperand
, *MaskOperand
, *Src0Operand
;
4327 getCompressingStoreOps(PtrOperand
, MaskOperand
, Src0Operand
, Alignment
);
4329 getMaskedStoreOps(PtrOperand
, MaskOperand
, Src0Operand
, Alignment
);
4331 SDValue Ptr
= getValue(PtrOperand
);
4332 SDValue Src0
= getValue(Src0Operand
);
4333 SDValue Mask
= getValue(MaskOperand
);
4334 SDValue Offset
= DAG
.getUNDEF(Ptr
.getValueType());
4336 EVT VT
= Src0
.getValueType();
4338 Alignment
= DAG
.getEVTAlignment(VT
);
4341 I
.getAAMetadata(AAInfo
);
4343 MachineMemOperand
*MMO
=
4344 DAG
.getMachineFunction().
4345 getMachineMemOperand(MachinePointerInfo(PtrOperand
),
4346 MachineMemOperand::MOStore
,
4347 // TODO: Make MachineMemOperands aware of scalable
4349 VT
.getStoreSize().getKnownMinSize(),
4352 DAG
.getMaskedStore(getMemoryRoot(), sdl
, Src0
, Ptr
, Offset
, Mask
, VT
, MMO
,
4353 ISD::UNINDEXED
, false /* Truncating */, IsCompressing
);
4354 DAG
.setRoot(StoreNode
);
4355 setValue(&I
, StoreNode
);
4358 // Get a uniform base for the Gather/Scatter intrinsic.
4359 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4360 // We try to represent it as a base pointer + vector of indices.
4361 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4362 // The first operand of the GEP may be a single pointer or a vector of pointers
4364 // %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4366 // %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
4367 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4369 // When the first GEP operand is a single pointer - it is the uniform base we
4370 // are looking for. If first operand of the GEP is a splat vector - we
4371 // extract the splat value and use it as a uniform base.
4372 // In all other cases the function returns 'false'.
4373 static bool getUniformBase(const Value
*&Ptr
, SDValue
&Base
, SDValue
&Index
,
4374 ISD::MemIndexType
&IndexType
, SDValue
&Scale
,
4375 SelectionDAGBuilder
*SDB
) {
4376 SelectionDAG
& DAG
= SDB
->DAG
;
4377 LLVMContext
&Context
= *DAG
.getContext();
4379 assert(Ptr
->getType()->isVectorTy() && "Uexpected pointer type");
4380 const GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
);
4384 const Value
*GEPPtr
= GEP
->getPointerOperand();
4385 if (!GEPPtr
->getType()->isVectorTy())
4387 else if (!(Ptr
= getSplatValue(GEPPtr
)))
4390 unsigned FinalIndex
= GEP
->getNumOperands() - 1;
4391 Value
*IndexVal
= GEP
->getOperand(FinalIndex
);
4392 gep_type_iterator GTI
= gep_type_begin(*GEP
);
4394 // Ensure all the other indices are 0.
4395 for (unsigned i
= 1; i
< FinalIndex
; ++i
, ++GTI
) {
4396 auto *C
= dyn_cast
<Constant
>(GEP
->getOperand(i
));
4399 if (isa
<VectorType
>(C
->getType()))
4400 C
= C
->getSplatValue();
4401 auto *CI
= dyn_cast_or_null
<ConstantInt
>(C
);
4402 if (!CI
|| !CI
->isZero())
4406 // The operands of the GEP may be defined in another basic block.
4407 // In this case we'll not find nodes for the operands.
4408 if (!SDB
->findValue(Ptr
))
4410 Constant
*C
= dyn_cast
<Constant
>(IndexVal
);
4411 if (!C
&& !SDB
->findValue(IndexVal
))
4414 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
4415 const DataLayout
&DL
= DAG
.getDataLayout();
4416 StructType
*STy
= GTI
.getStructTypeOrNull();
4419 const StructLayout
*SL
= DL
.getStructLayout(STy
);
4420 if (isa
<VectorType
>(C
->getType())) {
4421 C
= C
->getSplatValue();
4422 // FIXME: If getSplatValue may return nullptr for a structure?
4423 // If not, the following check can be removed.
4427 auto *CI
= cast
<ConstantInt
>(C
);
4428 Scale
= DAG
.getTargetConstant(1, SDB
->getCurSDLoc(), TLI
.getPointerTy(DL
));
4429 Index
= DAG
.getConstant(SL
->getElementOffset(CI
->getZExtValue()),
4430 SDB
->getCurSDLoc(), TLI
.getPointerTy(DL
));
4432 Scale
= DAG
.getTargetConstant(
4433 DL
.getTypeAllocSize(GEP
->getResultElementType()),
4434 SDB
->getCurSDLoc(), TLI
.getPointerTy(DL
));
4435 Index
= SDB
->getValue(IndexVal
);
4437 Base
= SDB
->getValue(Ptr
);
4438 IndexType
= ISD::SIGNED_SCALED
;
4440 if (STy
|| !Index
.getValueType().isVector()) {
4441 unsigned GEPWidth
= GEP
->getType()->getVectorNumElements();
4442 EVT VT
= EVT::getVectorVT(Context
, Index
.getValueType(), GEPWidth
);
4443 Index
= DAG
.getSplatBuildVector(VT
, SDLoc(Index
), Index
);
4448 void SelectionDAGBuilder::visitMaskedScatter(const CallInst
&I
) {
4449 SDLoc sdl
= getCurSDLoc();
4451 // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4452 const Value
*Ptr
= I
.getArgOperand(1);
4453 SDValue Src0
= getValue(I
.getArgOperand(0));
4454 SDValue Mask
= getValue(I
.getArgOperand(3));
4455 EVT VT
= Src0
.getValueType();
4456 unsigned Alignment
= (cast
<ConstantInt
>(I
.getArgOperand(2)))->getZExtValue();
4458 Alignment
= DAG
.getEVTAlignment(VT
);
4459 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
4462 I
.getAAMetadata(AAInfo
);
4466 ISD::MemIndexType IndexType
;
4468 const Value
*BasePtr
= Ptr
;
4469 bool UniformBase
= getUniformBase(BasePtr
, Base
, Index
, IndexType
, Scale
,
4472 const Value
*MemOpBasePtr
= UniformBase
? BasePtr
: nullptr;
4473 MachineMemOperand
*MMO
= DAG
.getMachineFunction().
4474 getMachineMemOperand(MachinePointerInfo(MemOpBasePtr
),
4475 MachineMemOperand::MOStore
,
4476 // TODO: Make MachineMemOperands aware of scalable
4478 VT
.getStoreSize().getKnownMinSize(),
4481 Base
= DAG
.getConstant(0, sdl
, TLI
.getPointerTy(DAG
.getDataLayout()));
4482 Index
= getValue(Ptr
);
4483 IndexType
= ISD::SIGNED_SCALED
;
4484 Scale
= DAG
.getTargetConstant(1, sdl
, TLI
.getPointerTy(DAG
.getDataLayout()));
4486 SDValue Ops
[] = { getMemoryRoot(), Src0
, Mask
, Base
, Index
, Scale
};
4487 SDValue Scatter
= DAG
.getMaskedScatter(DAG
.getVTList(MVT::Other
), VT
, sdl
,
4488 Ops
, MMO
, IndexType
);
4489 DAG
.setRoot(Scatter
);
4490 setValue(&I
, Scatter
);
4493 void SelectionDAGBuilder::visitMaskedLoad(const CallInst
&I
, bool IsExpanding
) {
4494 SDLoc sdl
= getCurSDLoc();
4496 auto getMaskedLoadOps
= [&](Value
* &Ptr
, Value
* &Mask
, Value
* &Src0
,
4497 unsigned& Alignment
) {
4498 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4499 Ptr
= I
.getArgOperand(0);
4500 Alignment
= cast
<ConstantInt
>(I
.getArgOperand(1))->getZExtValue();
4501 Mask
= I
.getArgOperand(2);
4502 Src0
= I
.getArgOperand(3);
4504 auto getExpandingLoadOps
= [&](Value
* &Ptr
, Value
* &Mask
, Value
* &Src0
,
4505 unsigned& Alignment
) {
4506 // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4507 Ptr
= I
.getArgOperand(0);
4509 Mask
= I
.getArgOperand(1);
4510 Src0
= I
.getArgOperand(2);
4513 Value
*PtrOperand
, *MaskOperand
, *Src0Operand
;
4516 getExpandingLoadOps(PtrOperand
, MaskOperand
, Src0Operand
, Alignment
);
4518 getMaskedLoadOps(PtrOperand
, MaskOperand
, Src0Operand
, Alignment
);
4520 SDValue Ptr
= getValue(PtrOperand
);
4521 SDValue Src0
= getValue(Src0Operand
);
4522 SDValue Mask
= getValue(MaskOperand
);
4523 SDValue Offset
= DAG
.getUNDEF(Ptr
.getValueType());
4525 EVT VT
= Src0
.getValueType();
4527 Alignment
= DAG
.getEVTAlignment(VT
);
4530 I
.getAAMetadata(AAInfo
);
4531 const MDNode
*Ranges
= I
.getMetadata(LLVMContext::MD_range
);
4533 // Do not serialize masked loads of constant memory with anything.
4535 if (VT
.isScalableVector())
4536 ML
= MemoryLocation(PtrOperand
);
4538 ML
= MemoryLocation(PtrOperand
, LocationSize::precise(
4539 DAG
.getDataLayout().getTypeStoreSize(I
.getType())),
4541 bool AddToChain
= !AA
|| !AA
->pointsToConstantMemory(ML
);
4543 SDValue InChain
= AddToChain
? DAG
.getRoot() : DAG
.getEntryNode();
4545 MachineMemOperand
*MMO
=
4546 DAG
.getMachineFunction().
4547 getMachineMemOperand(MachinePointerInfo(PtrOperand
),
4548 MachineMemOperand::MOLoad
,
4549 // TODO: Make MachineMemOperands aware of scalable
4551 VT
.getStoreSize().getKnownMinSize(),
4552 Alignment
, AAInfo
, Ranges
);
4555 DAG
.getMaskedLoad(VT
, sdl
, InChain
, Ptr
, Offset
, Mask
, Src0
, VT
, MMO
,
4556 ISD::UNINDEXED
, ISD::NON_EXTLOAD
, IsExpanding
);
4558 PendingLoads
.push_back(Load
.getValue(1));
4562 void SelectionDAGBuilder::visitMaskedGather(const CallInst
&I
) {
4563 SDLoc sdl
= getCurSDLoc();
4565 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4566 const Value
*Ptr
= I
.getArgOperand(0);
4567 SDValue Src0
= getValue(I
.getArgOperand(3));
4568 SDValue Mask
= getValue(I
.getArgOperand(2));
4570 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
4571 EVT VT
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
4572 unsigned Alignment
= (cast
<ConstantInt
>(I
.getArgOperand(1)))->getZExtValue();
4574 Alignment
= DAG
.getEVTAlignment(VT
);
4577 I
.getAAMetadata(AAInfo
);
4578 const MDNode
*Ranges
= I
.getMetadata(LLVMContext::MD_range
);
4580 SDValue Root
= DAG
.getRoot();
4583 ISD::MemIndexType IndexType
;
4585 const Value
*BasePtr
= Ptr
;
4586 bool UniformBase
= getUniformBase(BasePtr
, Base
, Index
, IndexType
, Scale
,
4588 bool ConstantMemory
= false;
4589 if (UniformBase
&& AA
&&
4590 AA
->pointsToConstantMemory(
4591 MemoryLocation(BasePtr
,
4592 LocationSize::precise(
4593 DAG
.getDataLayout().getTypeStoreSize(I
.getType())),
4595 // Do not serialize (non-volatile) loads of constant memory with anything.
4596 Root
= DAG
.getEntryNode();
4597 ConstantMemory
= true;
4600 MachineMemOperand
*MMO
=
4601 DAG
.getMachineFunction().
4602 getMachineMemOperand(MachinePointerInfo(UniformBase
? BasePtr
: nullptr),
4603 MachineMemOperand::MOLoad
,
4604 // TODO: Make MachineMemOperands aware of scalable
4606 VT
.getStoreSize().getKnownMinSize(),
4607 Alignment
, AAInfo
, Ranges
);
4610 Base
= DAG
.getConstant(0, sdl
, TLI
.getPointerTy(DAG
.getDataLayout()));
4611 Index
= getValue(Ptr
);
4612 IndexType
= ISD::SIGNED_SCALED
;
4613 Scale
= DAG
.getTargetConstant(1, sdl
, TLI
.getPointerTy(DAG
.getDataLayout()));
4615 SDValue Ops
[] = { Root
, Src0
, Mask
, Base
, Index
, Scale
};
4616 SDValue Gather
= DAG
.getMaskedGather(DAG
.getVTList(VT
, MVT::Other
), VT
, sdl
,
4617 Ops
, MMO
, IndexType
);
4619 SDValue OutChain
= Gather
.getValue(1);
4620 if (!ConstantMemory
)
4621 PendingLoads
.push_back(OutChain
);
4622 setValue(&I
, Gather
);
4625 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst
&I
) {
4626 SDLoc dl
= getCurSDLoc();
4627 AtomicOrdering SuccessOrdering
= I
.getSuccessOrdering();
4628 AtomicOrdering FailureOrdering
= I
.getFailureOrdering();
4629 SyncScope::ID SSID
= I
.getSyncScopeID();
4631 SDValue InChain
= getRoot();
4633 MVT MemVT
= getValue(I
.getCompareOperand()).getSimpleValueType();
4634 SDVTList VTs
= DAG
.getVTList(MemVT
, MVT::i1
, MVT::Other
);
4636 auto Alignment
= DAG
.getEVTAlignment(MemVT
);
4638 auto Flags
= MachineMemOperand::MOLoad
| MachineMemOperand::MOStore
;
4640 Flags
|= MachineMemOperand::MOVolatile
;
4641 Flags
|= DAG
.getTargetLoweringInfo().getMMOFlags(I
);
4643 MachineFunction
&MF
= DAG
.getMachineFunction();
4644 MachineMemOperand
*MMO
=
4645 MF
.getMachineMemOperand(MachinePointerInfo(I
.getPointerOperand()),
4646 Flags
, MemVT
.getStoreSize(), Alignment
,
4647 AAMDNodes(), nullptr, SSID
, SuccessOrdering
,
4650 SDValue L
= DAG
.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS
,
4651 dl
, MemVT
, VTs
, InChain
,
4652 getValue(I
.getPointerOperand()),
4653 getValue(I
.getCompareOperand()),
4654 getValue(I
.getNewValOperand()), MMO
);
4656 SDValue OutChain
= L
.getValue(2);
4659 DAG
.setRoot(OutChain
);
4662 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst
&I
) {
4663 SDLoc dl
= getCurSDLoc();
4665 switch (I
.getOperation()) {
4666 default: llvm_unreachable("Unknown atomicrmw operation");
4667 case AtomicRMWInst::Xchg
: NT
= ISD::ATOMIC_SWAP
; break;
4668 case AtomicRMWInst::Add
: NT
= ISD::ATOMIC_LOAD_ADD
; break;
4669 case AtomicRMWInst::Sub
: NT
= ISD::ATOMIC_LOAD_SUB
; break;
4670 case AtomicRMWInst::And
: NT
= ISD::ATOMIC_LOAD_AND
; break;
4671 case AtomicRMWInst::Nand
: NT
= ISD::ATOMIC_LOAD_NAND
; break;
4672 case AtomicRMWInst::Or
: NT
= ISD::ATOMIC_LOAD_OR
; break;
4673 case AtomicRMWInst::Xor
: NT
= ISD::ATOMIC_LOAD_XOR
; break;
4674 case AtomicRMWInst::Max
: NT
= ISD::ATOMIC_LOAD_MAX
; break;
4675 case AtomicRMWInst::Min
: NT
= ISD::ATOMIC_LOAD_MIN
; break;
4676 case AtomicRMWInst::UMax
: NT
= ISD::ATOMIC_LOAD_UMAX
; break;
4677 case AtomicRMWInst::UMin
: NT
= ISD::ATOMIC_LOAD_UMIN
; break;
4678 case AtomicRMWInst::FAdd
: NT
= ISD::ATOMIC_LOAD_FADD
; break;
4679 case AtomicRMWInst::FSub
: NT
= ISD::ATOMIC_LOAD_FSUB
; break;
4681 AtomicOrdering Ordering
= I
.getOrdering();
4682 SyncScope::ID SSID
= I
.getSyncScopeID();
4684 SDValue InChain
= getRoot();
4686 auto MemVT
= getValue(I
.getValOperand()).getSimpleValueType();
4687 auto Alignment
= DAG
.getEVTAlignment(MemVT
);
4689 auto Flags
= MachineMemOperand::MOLoad
| MachineMemOperand::MOStore
;
4691 Flags
|= MachineMemOperand::MOVolatile
;
4692 Flags
|= DAG
.getTargetLoweringInfo().getMMOFlags(I
);
4694 MachineFunction
&MF
= DAG
.getMachineFunction();
4695 MachineMemOperand
*MMO
=
4696 MF
.getMachineMemOperand(MachinePointerInfo(I
.getPointerOperand()), Flags
,
4697 MemVT
.getStoreSize(), Alignment
, AAMDNodes(),
4698 nullptr, SSID
, Ordering
);
4701 DAG
.getAtomic(NT
, dl
, MemVT
, InChain
,
4702 getValue(I
.getPointerOperand()), getValue(I
.getValOperand()),
4705 SDValue OutChain
= L
.getValue(1);
4708 DAG
.setRoot(OutChain
);
4711 void SelectionDAGBuilder::visitFence(const FenceInst
&I
) {
4712 SDLoc dl
= getCurSDLoc();
4713 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
4716 Ops
[1] = DAG
.getTargetConstant((unsigned)I
.getOrdering(), dl
,
4717 TLI
.getFenceOperandTy(DAG
.getDataLayout()));
4718 Ops
[2] = DAG
.getTargetConstant(I
.getSyncScopeID(), dl
,
4719 TLI
.getFenceOperandTy(DAG
.getDataLayout()));
4720 DAG
.setRoot(DAG
.getNode(ISD::ATOMIC_FENCE
, dl
, MVT::Other
, Ops
));
4723 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst
&I
) {
4724 SDLoc dl
= getCurSDLoc();
4725 AtomicOrdering Order
= I
.getOrdering();
4726 SyncScope::ID SSID
= I
.getSyncScopeID();
4728 SDValue InChain
= getRoot();
4730 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
4731 EVT VT
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
4732 EVT MemVT
= TLI
.getMemValueType(DAG
.getDataLayout(), I
.getType());
4734 if (!TLI
.supportsUnalignedAtomics() &&
4735 I
.getAlignment() < MemVT
.getSizeInBits() / 8)
4736 report_fatal_error("Cannot generate unaligned atomic load");
4738 auto Flags
= MachineMemOperand::MOLoad
;
4740 Flags
|= MachineMemOperand::MOVolatile
;
4741 if (I
.hasMetadata(LLVMContext::MD_invariant_load
))
4742 Flags
|= MachineMemOperand::MOInvariant
;
4743 if (isDereferenceablePointer(I
.getPointerOperand(), I
.getType(),
4744 DAG
.getDataLayout()))
4745 Flags
|= MachineMemOperand::MODereferenceable
;
4747 Flags
|= TLI
.getMMOFlags(I
);
4749 MachineMemOperand
*MMO
=
4750 DAG
.getMachineFunction().
4751 getMachineMemOperand(MachinePointerInfo(I
.getPointerOperand()),
4752 Flags
, MemVT
.getStoreSize(),
4753 I
.getAlignment() ? I
.getAlignment() :
4754 DAG
.getEVTAlignment(MemVT
),
4755 AAMDNodes(), nullptr, SSID
, Order
);
4757 InChain
= TLI
.prepareVolatileOrAtomicLoad(InChain
, dl
, DAG
);
4759 SDValue Ptr
= getValue(I
.getPointerOperand());
4761 if (TLI
.lowerAtomicLoadAsLoadSDNode(I
)) {
4762 // TODO: Once this is better exercised by tests, it should be merged with
4763 // the normal path for loads to prevent future divergence.
4764 SDValue L
= DAG
.getLoad(MemVT
, dl
, InChain
, Ptr
, MMO
);
4766 L
= DAG
.getPtrExtOrTrunc(L
, dl
, VT
);
4769 SDValue OutChain
= L
.getValue(1);
4770 if (!I
.isUnordered())
4771 DAG
.setRoot(OutChain
);
4773 PendingLoads
.push_back(OutChain
);
4777 SDValue L
= DAG
.getAtomic(ISD::ATOMIC_LOAD
, dl
, MemVT
, MemVT
, InChain
,
4780 SDValue OutChain
= L
.getValue(1);
4782 L
= DAG
.getPtrExtOrTrunc(L
, dl
, VT
);
4785 DAG
.setRoot(OutChain
);
4788 void SelectionDAGBuilder::visitAtomicStore(const StoreInst
&I
) {
4789 SDLoc dl
= getCurSDLoc();
4791 AtomicOrdering Ordering
= I
.getOrdering();
4792 SyncScope::ID SSID
= I
.getSyncScopeID();
4794 SDValue InChain
= getRoot();
4796 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
4798 TLI
.getMemValueType(DAG
.getDataLayout(), I
.getValueOperand()->getType());
4800 if (I
.getAlignment() < MemVT
.getSizeInBits() / 8)
4801 report_fatal_error("Cannot generate unaligned atomic store");
4803 auto Flags
= MachineMemOperand::MOStore
;
4805 Flags
|= MachineMemOperand::MOVolatile
;
4806 Flags
|= TLI
.getMMOFlags(I
);
4808 MachineFunction
&MF
= DAG
.getMachineFunction();
4809 MachineMemOperand
*MMO
=
4810 MF
.getMachineMemOperand(MachinePointerInfo(I
.getPointerOperand()), Flags
,
4811 MemVT
.getStoreSize(), I
.getAlignment(), AAMDNodes(),
4812 nullptr, SSID
, Ordering
);
4814 SDValue Val
= getValue(I
.getValueOperand());
4815 if (Val
.getValueType() != MemVT
)
4816 Val
= DAG
.getPtrExtOrTrunc(Val
, dl
, MemVT
);
4817 SDValue Ptr
= getValue(I
.getPointerOperand());
4819 if (TLI
.lowerAtomicStoreAsStoreSDNode(I
)) {
4820 // TODO: Once this is better exercised by tests, it should be merged with
4821 // the normal path for stores to prevent future divergence.
4822 SDValue S
= DAG
.getStore(InChain
, dl
, Val
, Ptr
, MMO
);
4826 SDValue OutChain
= DAG
.getAtomic(ISD::ATOMIC_STORE
, dl
, MemVT
, InChain
,
4830 DAG
.setRoot(OutChain
);
4833 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4835 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst
&I
,
4836 unsigned Intrinsic
) {
4837 // Ignore the callsite's attributes. A specific call site may be marked with
4838 // readnone, but the lowering code will expect the chain based on the
4840 const Function
*F
= I
.getCalledFunction();
4841 bool HasChain
= !F
->doesNotAccessMemory();
4842 bool OnlyLoad
= HasChain
&& F
->onlyReadsMemory();
4844 // Build the operand list.
4845 SmallVector
<SDValue
, 8> Ops
;
4846 if (HasChain
) { // If this intrinsic has side-effects, chainify it.
4848 // We don't need to serialize loads against other loads.
4849 Ops
.push_back(DAG
.getRoot());
4851 Ops
.push_back(getRoot());
4855 // Info is set by getTgtMemInstrinsic
4856 TargetLowering::IntrinsicInfo Info
;
4857 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
4858 bool IsTgtIntrinsic
= TLI
.getTgtMemIntrinsic(Info
, I
,
4859 DAG
.getMachineFunction(),
4862 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4863 if (!IsTgtIntrinsic
|| Info
.opc
== ISD::INTRINSIC_VOID
||
4864 Info
.opc
== ISD::INTRINSIC_W_CHAIN
)
4865 Ops
.push_back(DAG
.getTargetConstant(Intrinsic
, getCurSDLoc(),
4866 TLI
.getPointerTy(DAG
.getDataLayout())));
4868 // Add all operands of the call to the operand list.
4869 for (unsigned i
= 0, e
= I
.getNumArgOperands(); i
!= e
; ++i
) {
4870 const Value
*Arg
= I
.getArgOperand(i
);
4871 if (!I
.paramHasAttr(i
, Attribute::ImmArg
)) {
4872 Ops
.push_back(getValue(Arg
));
4876 // Use TargetConstant instead of a regular constant for immarg.
4877 EVT VT
= TLI
.getValueType(*DL
, Arg
->getType(), true);
4878 if (const ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Arg
)) {
4879 assert(CI
->getBitWidth() <= 64 &&
4880 "large intrinsic immediates not handled");
4881 Ops
.push_back(DAG
.getTargetConstant(*CI
, SDLoc(), VT
));
4884 DAG
.getTargetConstantFP(*cast
<ConstantFP
>(Arg
), SDLoc(), VT
));
4888 SmallVector
<EVT
, 4> ValueVTs
;
4889 ComputeValueVTs(TLI
, DAG
.getDataLayout(), I
.getType(), ValueVTs
);
4892 ValueVTs
.push_back(MVT::Other
);
4894 SDVTList VTs
= DAG
.getVTList(ValueVTs
);
4898 if (IsTgtIntrinsic
) {
4899 // This is target intrinsic that touches memory
4901 I
.getAAMetadata(AAInfo
);
4902 Result
= DAG
.getMemIntrinsicNode(
4903 Info
.opc
, getCurSDLoc(), VTs
, Ops
, Info
.memVT
,
4904 MachinePointerInfo(Info
.ptrVal
, Info
.offset
),
4905 Info
.align
? Info
.align
->value() : 0, Info
.flags
, Info
.size
, AAInfo
);
4906 } else if (!HasChain
) {
4907 Result
= DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, getCurSDLoc(), VTs
, Ops
);
4908 } else if (!I
.getType()->isVoidTy()) {
4909 Result
= DAG
.getNode(ISD::INTRINSIC_W_CHAIN
, getCurSDLoc(), VTs
, Ops
);
4911 Result
= DAG
.getNode(ISD::INTRINSIC_VOID
, getCurSDLoc(), VTs
, Ops
);
4915 SDValue Chain
= Result
.getValue(Result
.getNode()->getNumValues()-1);
4917 PendingLoads
.push_back(Chain
);
4922 if (!I
.getType()->isVoidTy()) {
4923 if (VectorType
*PTy
= dyn_cast
<VectorType
>(I
.getType())) {
4924 EVT VT
= TLI
.getValueType(DAG
.getDataLayout(), PTy
);
4925 Result
= DAG
.getNode(ISD::BITCAST
, getCurSDLoc(), VT
, Result
);
4927 Result
= lowerRangeToAssertZExt(DAG
, I
, Result
);
4929 setValue(&I
, Result
);
4933 /// GetSignificand - Get the significand and build it into a floating-point
4934 /// number with exponent of 1:
4936 /// Op = (Op & 0x007fffff) | 0x3f800000;
4938 /// where Op is the hexadecimal representation of floating point value.
4939 static SDValue
GetSignificand(SelectionDAG
&DAG
, SDValue Op
, const SDLoc
&dl
) {
4940 SDValue t1
= DAG
.getNode(ISD::AND
, dl
, MVT::i32
, Op
,
4941 DAG
.getConstant(0x007fffff, dl
, MVT::i32
));
4942 SDValue t2
= DAG
.getNode(ISD::OR
, dl
, MVT::i32
, t1
,
4943 DAG
.getConstant(0x3f800000, dl
, MVT::i32
));
4944 return DAG
.getNode(ISD::BITCAST
, dl
, MVT::f32
, t2
);
4947 /// GetExponent - Get the exponent:
4949 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4951 /// where Op is the hexadecimal representation of floating point value.
4952 static SDValue
GetExponent(SelectionDAG
&DAG
, SDValue Op
,
4953 const TargetLowering
&TLI
, const SDLoc
&dl
) {
4954 SDValue t0
= DAG
.getNode(ISD::AND
, dl
, MVT::i32
, Op
,
4955 DAG
.getConstant(0x7f800000, dl
, MVT::i32
));
4956 SDValue t1
= DAG
.getNode(
4957 ISD::SRL
, dl
, MVT::i32
, t0
,
4958 DAG
.getConstant(23, dl
, TLI
.getPointerTy(DAG
.getDataLayout())));
4959 SDValue t2
= DAG
.getNode(ISD::SUB
, dl
, MVT::i32
, t1
,
4960 DAG
.getConstant(127, dl
, MVT::i32
));
4961 return DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::f32
, t2
);
4964 /// getF32Constant - Get 32-bit floating point constant.
4965 static SDValue
getF32Constant(SelectionDAG
&DAG
, unsigned Flt
,
4967 return DAG
.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt
)), dl
,
4971 static SDValue
getLimitedPrecisionExp2(SDValue t0
, const SDLoc
&dl
,
4972 SelectionDAG
&DAG
) {
4973 // TODO: What fast-math-flags should be set on the floating-point nodes?
4975 // IntegerPartOfX = ((int32_t)(t0);
4976 SDValue IntegerPartOfX
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, MVT::i32
, t0
);
4978 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
4979 SDValue t1
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::f32
, IntegerPartOfX
);
4980 SDValue X
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t0
, t1
);
4982 // IntegerPartOfX <<= 23;
4983 IntegerPartOfX
= DAG
.getNode(
4984 ISD::SHL
, dl
, MVT::i32
, IntegerPartOfX
,
4985 DAG
.getConstant(23, dl
, DAG
.getTargetLoweringInfo().getPointerTy(
4986 DAG
.getDataLayout())));
4988 SDValue TwoToFractionalPartOfX
;
4989 if (LimitFloatPrecision
<= 6) {
4990 // For floating-point precision of 6:
4992 // TwoToFractionalPartOfX =
4994 // (0.735607626f + 0.252464424f * x) * x;
4996 // error 0.0144103317, which is 6 bits
4997 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
4998 getF32Constant(DAG
, 0x3e814304, dl
));
4999 SDValue t3
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t2
,
5000 getF32Constant(DAG
, 0x3f3c50c8, dl
));
5001 SDValue t4
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t3
, X
);
5002 TwoToFractionalPartOfX
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t4
,
5003 getF32Constant(DAG
, 0x3f7f5e7e, dl
));
5004 } else if (LimitFloatPrecision
<= 12) {
5005 // For floating-point precision of 12:
5007 // TwoToFractionalPartOfX =
5010 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
5012 // error 0.000107046256, which is 13 to 14 bits
5013 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5014 getF32Constant(DAG
, 0x3da235e3, dl
));
5015 SDValue t3
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t2
,
5016 getF32Constant(DAG
, 0x3e65b8f3, dl
));
5017 SDValue t4
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t3
, X
);
5018 SDValue t5
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t4
,
5019 getF32Constant(DAG
, 0x3f324b07, dl
));
5020 SDValue t6
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t5
, X
);
5021 TwoToFractionalPartOfX
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t6
,
5022 getF32Constant(DAG
, 0x3f7ff8fd, dl
));
5023 } else { // LimitFloatPrecision <= 18
5024 // For floating-point precision of 18:
5026 // TwoToFractionalPartOfX =
5030 // (0.554906021e-1f +
5031 // (0.961591928e-2f +
5032 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5033 // error 2.47208000*10^(-7), which is better than 18 bits
5034 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5035 getF32Constant(DAG
, 0x3924b03e, dl
));
5036 SDValue t3
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t2
,
5037 getF32Constant(DAG
, 0x3ab24b87, dl
));
5038 SDValue t4
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t3
, X
);
5039 SDValue t5
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t4
,
5040 getF32Constant(DAG
, 0x3c1d8c17, dl
));
5041 SDValue t6
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t5
, X
);
5042 SDValue t7
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t6
,
5043 getF32Constant(DAG
, 0x3d634a1d, dl
));
5044 SDValue t8
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t7
, X
);
5045 SDValue t9
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t8
,
5046 getF32Constant(DAG
, 0x3e75fe14, dl
));
5047 SDValue t10
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t9
, X
);
5048 SDValue t11
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t10
,
5049 getF32Constant(DAG
, 0x3f317234, dl
));
5050 SDValue t12
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t11
, X
);
5051 TwoToFractionalPartOfX
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t12
,
5052 getF32Constant(DAG
, 0x3f800000, dl
));
5055 // Add the exponent into the result in integer domain.
5056 SDValue t13
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i32
, TwoToFractionalPartOfX
);
5057 return DAG
.getNode(ISD::BITCAST
, dl
, MVT::f32
,
5058 DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, t13
, IntegerPartOfX
));
5061 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5062 /// limited-precision mode.
5063 static SDValue
expandExp(const SDLoc
&dl
, SDValue Op
, SelectionDAG
&DAG
,
5064 const TargetLowering
&TLI
) {
5065 if (Op
.getValueType() == MVT::f32
&&
5066 LimitFloatPrecision
> 0 && LimitFloatPrecision
<= 18) {
5068 // Put the exponent in the right bit position for later addition to the
5071 // t0 = Op * log2(e)
5073 // TODO: What fast-math-flags should be set here?
5074 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, Op
,
5075 DAG
.getConstantFP(numbers::log2ef
, dl
, MVT::f32
));
5076 return getLimitedPrecisionExp2(t0
, dl
, DAG
);
5079 // No special expansion.
5080 return DAG
.getNode(ISD::FEXP
, dl
, Op
.getValueType(), Op
);
5083 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5084 /// limited-precision mode.
5085 static SDValue
expandLog(const SDLoc
&dl
, SDValue Op
, SelectionDAG
&DAG
,
5086 const TargetLowering
&TLI
) {
5087 // TODO: What fast-math-flags should be set on the floating-point nodes?
5089 if (Op
.getValueType() == MVT::f32
&&
5090 LimitFloatPrecision
> 0 && LimitFloatPrecision
<= 18) {
5091 SDValue Op1
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i32
, Op
);
5093 // Scale the exponent by log(2).
5094 SDValue Exp
= GetExponent(DAG
, Op1
, TLI
, dl
);
5095 SDValue LogOfExponent
=
5096 DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, Exp
,
5097 DAG
.getConstantFP(numbers::ln2f
, dl
, MVT::f32
));
5099 // Get the significand and build it into a floating-point number with
5101 SDValue X
= GetSignificand(DAG
, Op1
, dl
);
5103 SDValue LogOfMantissa
;
5104 if (LimitFloatPrecision
<= 6) {
5105 // For floating-point precision of 6:
5109 // (1.4034025f - 0.23903021f * x) * x;
5111 // error 0.0034276066, which is better than 8 bits
5112 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5113 getF32Constant(DAG
, 0xbe74c456, dl
));
5114 SDValue t1
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t0
,
5115 getF32Constant(DAG
, 0x3fb3a2b1, dl
));
5116 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t1
, X
);
5117 LogOfMantissa
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t2
,
5118 getF32Constant(DAG
, 0x3f949a29, dl
));
5119 } else if (LimitFloatPrecision
<= 12) {
5120 // For floating-point precision of 12:
5126 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5128 // error 0.000061011436, which is 14 bits
5129 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5130 getF32Constant(DAG
, 0xbd67b6d6, dl
));
5131 SDValue t1
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t0
,
5132 getF32Constant(DAG
, 0x3ee4f4b8, dl
));
5133 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t1
, X
);
5134 SDValue t3
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t2
,
5135 getF32Constant(DAG
, 0x3fbc278b, dl
));
5136 SDValue t4
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t3
, X
);
5137 SDValue t5
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t4
,
5138 getF32Constant(DAG
, 0x40348e95, dl
));
5139 SDValue t6
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t5
, X
);
5140 LogOfMantissa
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t6
,
5141 getF32Constant(DAG
, 0x3fdef31a, dl
));
5142 } else { // LimitFloatPrecision <= 18
5143 // For floating-point precision of 18:
5151 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5153 // error 0.0000023660568, which is better than 18 bits
5154 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5155 getF32Constant(DAG
, 0xbc91e5ac, dl
));
5156 SDValue t1
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t0
,
5157 getF32Constant(DAG
, 0x3e4350aa, dl
));
5158 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t1
, X
);
5159 SDValue t3
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t2
,
5160 getF32Constant(DAG
, 0x3f60d3e3, dl
));
5161 SDValue t4
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t3
, X
);
5162 SDValue t5
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t4
,
5163 getF32Constant(DAG
, 0x4011cdf0, dl
));
5164 SDValue t6
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t5
, X
);
5165 SDValue t7
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t6
,
5166 getF32Constant(DAG
, 0x406cfd1c, dl
));
5167 SDValue t8
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t7
, X
);
5168 SDValue t9
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t8
,
5169 getF32Constant(DAG
, 0x408797cb, dl
));
5170 SDValue t10
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t9
, X
);
5171 LogOfMantissa
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t10
,
5172 getF32Constant(DAG
, 0x4006dcab, dl
));
5175 return DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, LogOfExponent
, LogOfMantissa
);
5178 // No special expansion.
5179 return DAG
.getNode(ISD::FLOG
, dl
, Op
.getValueType(), Op
);
5182 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5183 /// limited-precision mode.
5184 static SDValue
expandLog2(const SDLoc
&dl
, SDValue Op
, SelectionDAG
&DAG
,
5185 const TargetLowering
&TLI
) {
5186 // TODO: What fast-math-flags should be set on the floating-point nodes?
5188 if (Op
.getValueType() == MVT::f32
&&
5189 LimitFloatPrecision
> 0 && LimitFloatPrecision
<= 18) {
5190 SDValue Op1
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i32
, Op
);
5192 // Get the exponent.
5193 SDValue LogOfExponent
= GetExponent(DAG
, Op1
, TLI
, dl
);
5195 // Get the significand and build it into a floating-point number with
5197 SDValue X
= GetSignificand(DAG
, Op1
, dl
);
5199 // Different possible minimax approximations of significand in
5200 // floating-point for various degrees of accuracy over [1,2].
5201 SDValue Log2ofMantissa
;
5202 if (LimitFloatPrecision
<= 6) {
5203 // For floating-point precision of 6:
5205 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5207 // error 0.0049451742, which is more than 7 bits
5208 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5209 getF32Constant(DAG
, 0xbeb08fe0, dl
));
5210 SDValue t1
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t0
,
5211 getF32Constant(DAG
, 0x40019463, dl
));
5212 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t1
, X
);
5213 Log2ofMantissa
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t2
,
5214 getF32Constant(DAG
, 0x3fd6633d, dl
));
5215 } else if (LimitFloatPrecision
<= 12) {
5216 // For floating-point precision of 12:
5222 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5224 // error 0.0000876136000, which is better than 13 bits
5225 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5226 getF32Constant(DAG
, 0xbda7262e, dl
));
5227 SDValue t1
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t0
,
5228 getF32Constant(DAG
, 0x3f25280b, dl
));
5229 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t1
, X
);
5230 SDValue t3
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t2
,
5231 getF32Constant(DAG
, 0x4007b923, dl
));
5232 SDValue t4
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t3
, X
);
5233 SDValue t5
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t4
,
5234 getF32Constant(DAG
, 0x40823e2f, dl
));
5235 SDValue t6
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t5
, X
);
5236 Log2ofMantissa
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t6
,
5237 getF32Constant(DAG
, 0x4020d29c, dl
));
5238 } else { // LimitFloatPrecision <= 18
5239 // For floating-point precision of 18:
5248 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5250 // error 0.0000018516, which is better than 18 bits
5251 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5252 getF32Constant(DAG
, 0xbcd2769e, dl
));
5253 SDValue t1
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t0
,
5254 getF32Constant(DAG
, 0x3e8ce0b9, dl
));
5255 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t1
, X
);
5256 SDValue t3
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t2
,
5257 getF32Constant(DAG
, 0x3fa22ae7, dl
));
5258 SDValue t4
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t3
, X
);
5259 SDValue t5
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t4
,
5260 getF32Constant(DAG
, 0x40525723, dl
));
5261 SDValue t6
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t5
, X
);
5262 SDValue t7
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t6
,
5263 getF32Constant(DAG
, 0x40aaf200, dl
));
5264 SDValue t8
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t7
, X
);
5265 SDValue t9
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t8
,
5266 getF32Constant(DAG
, 0x40c39dad, dl
));
5267 SDValue t10
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t9
, X
);
5268 Log2ofMantissa
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t10
,
5269 getF32Constant(DAG
, 0x4042902c, dl
));
5272 return DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, LogOfExponent
, Log2ofMantissa
);
5275 // No special expansion.
5276 return DAG
.getNode(ISD::FLOG2
, dl
, Op
.getValueType(), Op
);
5279 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5280 /// limited-precision mode.
5281 static SDValue
expandLog10(const SDLoc
&dl
, SDValue Op
, SelectionDAG
&DAG
,
5282 const TargetLowering
&TLI
) {
5283 // TODO: What fast-math-flags should be set on the floating-point nodes?
5285 if (Op
.getValueType() == MVT::f32
&&
5286 LimitFloatPrecision
> 0 && LimitFloatPrecision
<= 18) {
5287 SDValue Op1
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i32
, Op
);
5289 // Scale the exponent by log10(2) [0.30102999f].
5290 SDValue Exp
= GetExponent(DAG
, Op1
, TLI
, dl
);
5291 SDValue LogOfExponent
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, Exp
,
5292 getF32Constant(DAG
, 0x3e9a209a, dl
));
5294 // Get the significand and build it into a floating-point number with
5296 SDValue X
= GetSignificand(DAG
, Op1
, dl
);
5298 SDValue Log10ofMantissa
;
5299 if (LimitFloatPrecision
<= 6) {
5300 // For floating-point precision of 6:
5302 // Log10ofMantissa =
5304 // (0.60948995f - 0.10380950f * x) * x;
5306 // error 0.0014886165, which is 6 bits
5307 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5308 getF32Constant(DAG
, 0xbdd49a13, dl
));
5309 SDValue t1
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t0
,
5310 getF32Constant(DAG
, 0x3f1c0789, dl
));
5311 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t1
, X
);
5312 Log10ofMantissa
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t2
,
5313 getF32Constant(DAG
, 0x3f011300, dl
));
5314 } else if (LimitFloatPrecision
<= 12) {
5315 // For floating-point precision of 12:
5317 // Log10ofMantissa =
5320 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5322 // error 0.00019228036, which is better than 12 bits
5323 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5324 getF32Constant(DAG
, 0x3d431f31, dl
));
5325 SDValue t1
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t0
,
5326 getF32Constant(DAG
, 0x3ea21fb2, dl
));
5327 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t1
, X
);
5328 SDValue t3
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t2
,
5329 getF32Constant(DAG
, 0x3f6ae232, dl
));
5330 SDValue t4
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t3
, X
);
5331 Log10ofMantissa
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t4
,
5332 getF32Constant(DAG
, 0x3f25f7c3, dl
));
5333 } else { // LimitFloatPrecision <= 18
5334 // For floating-point precision of 18:
5336 // Log10ofMantissa =
5341 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5343 // error 0.0000037995730, which is better than 18 bits
5344 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, X
,
5345 getF32Constant(DAG
, 0x3c5d51ce, dl
));
5346 SDValue t1
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t0
,
5347 getF32Constant(DAG
, 0x3e00685a, dl
));
5348 SDValue t2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t1
, X
);
5349 SDValue t3
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t2
,
5350 getF32Constant(DAG
, 0x3efb6798, dl
));
5351 SDValue t4
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t3
, X
);
5352 SDValue t5
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t4
,
5353 getF32Constant(DAG
, 0x3f88d192, dl
));
5354 SDValue t6
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t5
, X
);
5355 SDValue t7
= DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, t6
,
5356 getF32Constant(DAG
, 0x3fc4316c, dl
));
5357 SDValue t8
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, t7
, X
);
5358 Log10ofMantissa
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f32
, t8
,
5359 getF32Constant(DAG
, 0x3f57ce70, dl
));
5362 return DAG
.getNode(ISD::FADD
, dl
, MVT::f32
, LogOfExponent
, Log10ofMantissa
);
5365 // No special expansion.
5366 return DAG
.getNode(ISD::FLOG10
, dl
, Op
.getValueType(), Op
);
5369 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5370 /// limited-precision mode.
5371 static SDValue
expandExp2(const SDLoc
&dl
, SDValue Op
, SelectionDAG
&DAG
,
5372 const TargetLowering
&TLI
) {
5373 if (Op
.getValueType() == MVT::f32
&&
5374 LimitFloatPrecision
> 0 && LimitFloatPrecision
<= 18)
5375 return getLimitedPrecisionExp2(Op
, dl
, DAG
);
5377 // No special expansion.
5378 return DAG
.getNode(ISD::FEXP2
, dl
, Op
.getValueType(), Op
);
5381 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5382 /// limited-precision mode with x == 10.0f.
5383 static SDValue
expandPow(const SDLoc
&dl
, SDValue LHS
, SDValue RHS
,
5384 SelectionDAG
&DAG
, const TargetLowering
&TLI
) {
5385 bool IsExp10
= false;
5386 if (LHS
.getValueType() == MVT::f32
&& RHS
.getValueType() == MVT::f32
&&
5387 LimitFloatPrecision
> 0 && LimitFloatPrecision
<= 18) {
5388 if (ConstantFPSDNode
*LHSC
= dyn_cast
<ConstantFPSDNode
>(LHS
)) {
5390 IsExp10
= LHSC
->isExactlyValue(Ten
);
5394 // TODO: What fast-math-flags should be set on the FMUL node?
5396 // Put the exponent in the right bit position for later addition to the
5399 // #define LOG2OF10 3.3219281f
5400 // t0 = Op * LOG2OF10;
5401 SDValue t0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f32
, RHS
,
5402 getF32Constant(DAG
, 0x40549a78, dl
));
5403 return getLimitedPrecisionExp2(t0
, dl
, DAG
);
5406 // No special expansion.
5407 return DAG
.getNode(ISD::FPOW
, dl
, LHS
.getValueType(), LHS
, RHS
);
5410 /// ExpandPowI - Expand a llvm.powi intrinsic.
5411 static SDValue
ExpandPowI(const SDLoc
&DL
, SDValue LHS
, SDValue RHS
,
5412 SelectionDAG
&DAG
) {
5413 // If RHS is a constant, we can expand this out to a multiplication tree,
5414 // otherwise we end up lowering to a call to __powidf2 (for example). When
5415 // optimizing for size, we only want to do this if the expansion would produce
5416 // a small number of multiplies, otherwise we do the full expansion.
5417 if (ConstantSDNode
*RHSC
= dyn_cast
<ConstantSDNode
>(RHS
)) {
5418 // Get the exponent as a positive value.
5419 unsigned Val
= RHSC
->getSExtValue();
5420 if ((int)Val
< 0) Val
= -Val
;
5422 // powi(x, 0) -> 1.0
5424 return DAG
.getConstantFP(1.0, DL
, LHS
.getValueType());
5426 bool OptForSize
= DAG
.shouldOptForSize();
5428 // If optimizing for size, don't insert too many multiplies.
5429 // This inserts up to 5 multiplies.
5430 countPopulation(Val
) + Log2_32(Val
) < 7) {
5431 // We use the simple binary decomposition method to generate the multiply
5432 // sequence. There are more optimal ways to do this (for example,
5433 // powi(x,15) generates one more multiply than it should), but this has
5434 // the benefit of being both really simple and much better than a libcall.
5435 SDValue Res
; // Logically starts equal to 1.0
5436 SDValue CurSquare
= LHS
;
5437 // TODO: Intrinsics should have fast-math-flags that propagate to these
5442 Res
= DAG
.getNode(ISD::FMUL
, DL
,Res
.getValueType(), Res
, CurSquare
);
5444 Res
= CurSquare
; // 1.0*CurSquare.
5447 CurSquare
= DAG
.getNode(ISD::FMUL
, DL
, CurSquare
.getValueType(),
5448 CurSquare
, CurSquare
);
5452 // If the original was negative, invert the result, producing 1/(x*x*x).
5453 if (RHSC
->getSExtValue() < 0)
5454 Res
= DAG
.getNode(ISD::FDIV
, DL
, LHS
.getValueType(),
5455 DAG
.getConstantFP(1.0, DL
, LHS
.getValueType()), Res
);
5460 // Otherwise, expand to a libcall.
5461 return DAG
.getNode(ISD::FPOWI
, DL
, LHS
.getValueType(), LHS
, RHS
);
5464 static SDValue
expandDivFix(unsigned Opcode
, const SDLoc
&DL
,
5465 SDValue LHS
, SDValue RHS
, SDValue Scale
,
5466 SelectionDAG
&DAG
, const TargetLowering
&TLI
) {
5467 EVT VT
= LHS
.getValueType();
5468 bool Signed
= Opcode
== ISD::SDIVFIX
;
5469 LLVMContext
&Ctx
= *DAG
.getContext();
5471 // If the type is legal but the operation isn't, this node might survive all
5472 // the way to operation legalization. If we end up there and we do not have
5473 // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5476 // Coax the legalizer into expanding the node during type legalization instead
5477 // by bumping the size by one bit. This will force it to Promote, enabling the
5478 // early expansion and avoiding the need to expand later.
5480 // We don't have to do this if Scale is 0; that can always be expanded.
5482 // FIXME: We wouldn't have to do this (or any of the early
5483 // expansion/promotion) if it was possible to expand a libcall of an
5484 // illegal type during operation legalization. But it's not, so things
5486 unsigned ScaleInt
= cast
<ConstantSDNode
>(Scale
)->getZExtValue();
5488 (TLI
.isTypeLegal(VT
) ||
5489 (VT
.isVector() && TLI
.isTypeLegal(VT
.getVectorElementType())))) {
5490 TargetLowering::LegalizeAction Action
= TLI
.getFixedPointOperationAction(
5491 Opcode
, VT
, ScaleInt
);
5492 if (Action
!= TargetLowering::Legal
&& Action
!= TargetLowering::Custom
) {
5494 if (VT
.isScalarInteger())
5495 PromVT
= EVT::getIntegerVT(Ctx
, VT
.getSizeInBits() + 1);
5496 else if (VT
.isVector()) {
5497 PromVT
= VT
.getVectorElementType();
5498 PromVT
= EVT::getIntegerVT(Ctx
, PromVT
.getSizeInBits() + 1);
5499 PromVT
= EVT::getVectorVT(Ctx
, PromVT
, VT
.getVectorElementCount());
5501 llvm_unreachable("Wrong VT for DIVFIX?");
5503 LHS
= DAG
.getSExtOrTrunc(LHS
, DL
, PromVT
);
5504 RHS
= DAG
.getSExtOrTrunc(RHS
, DL
, PromVT
);
5506 LHS
= DAG
.getZExtOrTrunc(LHS
, DL
, PromVT
);
5507 RHS
= DAG
.getZExtOrTrunc(RHS
, DL
, PromVT
);
5509 // TODO: Saturation.
5510 SDValue Res
= DAG
.getNode(Opcode
, DL
, PromVT
, LHS
, RHS
, Scale
);
5511 return DAG
.getZExtOrTrunc(Res
, DL
, VT
);
5515 return DAG
.getNode(Opcode
, DL
, VT
, LHS
, RHS
, Scale
);
5518 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5519 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5521 getUnderlyingArgRegs(SmallVectorImpl
<std::pair
<unsigned, unsigned>> &Regs
,
5523 switch (N
.getOpcode()) {
5524 case ISD::CopyFromReg
: {
5525 SDValue Op
= N
.getOperand(1);
5526 Regs
.emplace_back(cast
<RegisterSDNode
>(Op
)->getReg(),
5527 Op
.getValueType().getSizeInBits());
5531 case ISD::AssertZext
:
5532 case ISD::AssertSext
:
5534 getUnderlyingArgRegs(Regs
, N
.getOperand(0));
5536 case ISD::BUILD_PAIR
:
5537 case ISD::BUILD_VECTOR
:
5538 case ISD::CONCAT_VECTORS
:
5539 for (SDValue Op
: N
->op_values())
5540 getUnderlyingArgRegs(Regs
, Op
);
5547 /// If the DbgValueInst is a dbg_value of a function argument, create the
5548 /// corresponding DBG_VALUE machine instruction for it now. At the end of
5549 /// instruction selection, they will be inserted to the entry BB.
5550 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5551 const Value
*V
, DILocalVariable
*Variable
, DIExpression
*Expr
,
5552 DILocation
*DL
, bool IsDbgDeclare
, const SDValue
&N
) {
5553 const Argument
*Arg
= dyn_cast
<Argument
>(V
);
5557 if (!IsDbgDeclare
) {
5558 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5559 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5561 bool IsInEntryBlock
= FuncInfo
.MBB
== &FuncInfo
.MF
->front();
5562 if (!IsInEntryBlock
)
5565 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5566 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5567 // variable that also is a param.
5569 // Although, if we are at the top of the entry block already, we can still
5570 // emit using ArgDbgValue. This might catch some situations when the
5571 // dbg.value refers to an argument that isn't used in the entry block, so
5572 // any CopyToReg node would be optimized out and the only way to express
5573 // this DBG_VALUE is by using the physical reg (or FI) as done in this
5574 // method. ArgDbgValues are hoisted to the beginning of the entry block. So
5575 // we should only emit as ArgDbgValue if the Variable is an argument to the
5576 // current function, and the dbg.value intrinsic is found in the entry
5578 bool VariableIsFunctionInputArg
= Variable
->isParameter() &&
5579 !DL
->getInlinedAt();
5580 bool IsInPrologue
= SDNodeOrder
== LowestSDNodeOrder
;
5581 if (!IsInPrologue
&& !VariableIsFunctionInputArg
)
5584 // Here we assume that a function argument on IR level only can be used to
5585 // describe one input parameter on source level. If we for example have
5586 // source code like this
5588 // struct A { long x, y; };
5589 // void foo(struct A a, long b) {
5597 // define void @foo(i32 %a1, i32 %a2, i32 %b) {
5599 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5600 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5601 // call void @llvm.dbg.value(metadata i32 %b, "b",
5603 // call void @llvm.dbg.value(metadata i32 %a1, "b"
5606 // then the last dbg.value is describing a parameter "b" using a value that
5607 // is an argument. But since we already has used %a1 to describe a parameter
5608 // we should not handle that last dbg.value here (that would result in an
5609 // incorrect hoisting of the DBG_VALUE to the function entry).
5610 // Notice that we allow one dbg.value per IR level argument, to accommodate
5611 // for the situation with fragments above.
5612 if (VariableIsFunctionInputArg
) {
5613 unsigned ArgNo
= Arg
->getArgNo();
5614 if (ArgNo
>= FuncInfo
.DescribedArgs
.size())
5615 FuncInfo
.DescribedArgs
.resize(ArgNo
+ 1, false);
5616 else if (!IsInPrologue
&& FuncInfo
.DescribedArgs
.test(ArgNo
))
5618 FuncInfo
.DescribedArgs
.set(ArgNo
);
5622 MachineFunction
&MF
= DAG
.getMachineFunction();
5623 const TargetInstrInfo
*TII
= DAG
.getSubtarget().getInstrInfo();
5625 Optional
<MachineOperand
> Op
;
5626 // Some arguments' frame index is recorded during argument lowering.
5627 int FI
= FuncInfo
.getArgumentFrameIndex(Arg
);
5628 if (FI
!= std::numeric_limits
<int>::max())
5629 Op
= MachineOperand::CreateFI(FI
);
5631 SmallVector
<std::pair
<unsigned, unsigned>, 8> ArgRegsAndSizes
;
5632 if (!Op
&& N
.getNode()) {
5633 getUnderlyingArgRegs(ArgRegsAndSizes
, N
);
5635 if (ArgRegsAndSizes
.size() == 1)
5636 Reg
= ArgRegsAndSizes
.front().first
;
5638 if (Reg
&& Reg
.isVirtual()) {
5639 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
5640 Register PR
= RegInfo
.getLiveInPhysReg(Reg
);
5645 Op
= MachineOperand::CreateReg(Reg
, false);
5649 if (!Op
&& N
.getNode()) {
5650 // Check if frame index is available.
5651 SDValue LCandidate
= peekThroughBitcasts(N
);
5652 if (LoadSDNode
*LNode
= dyn_cast
<LoadSDNode
>(LCandidate
.getNode()))
5653 if (FrameIndexSDNode
*FINode
=
5654 dyn_cast
<FrameIndexSDNode
>(LNode
->getBasePtr().getNode()))
5655 Op
= MachineOperand::CreateFI(FINode
->getIndex());
5659 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5660 auto splitMultiRegDbgValue
5661 = [&](ArrayRef
<std::pair
<unsigned, unsigned>> SplitRegs
) {
5662 unsigned Offset
= 0;
5663 for (auto RegAndSize
: SplitRegs
) {
5664 // If the expression is already a fragment, the current register
5665 // offset+size might extend beyond the fragment. In this case, only
5666 // the register bits that are inside the fragment are relevant.
5667 int RegFragmentSizeInBits
= RegAndSize
.second
;
5668 if (auto ExprFragmentInfo
= Expr
->getFragmentInfo()) {
5669 uint64_t ExprFragmentSizeInBits
= ExprFragmentInfo
->SizeInBits
;
5670 // The register is entirely outside the expression fragment,
5671 // so is irrelevant for debug info.
5672 if (Offset
>= ExprFragmentSizeInBits
)
5674 // The register is partially outside the expression fragment, only
5675 // the low bits within the fragment are relevant for debug info.
5676 if (Offset
+ RegFragmentSizeInBits
> ExprFragmentSizeInBits
) {
5677 RegFragmentSizeInBits
= ExprFragmentSizeInBits
- Offset
;
5681 auto FragmentExpr
= DIExpression::createFragmentExpression(
5682 Expr
, Offset
, RegFragmentSizeInBits
);
5683 Offset
+= RegAndSize
.second
;
5684 // If a valid fragment expression cannot be created, the variable's
5685 // correct value cannot be determined and so it is set as Undef.
5686 if (!FragmentExpr
) {
5687 SDDbgValue
*SDV
= DAG
.getConstantDbgValue(
5688 Variable
, Expr
, UndefValue::get(V
->getType()), DL
, SDNodeOrder
);
5689 DAG
.AddDbgValue(SDV
, nullptr, false);
5692 assert(!IsDbgDeclare
&& "DbgDeclare operand is not in memory?");
5693 FuncInfo
.ArgDbgValues
.push_back(
5694 BuildMI(MF
, DL
, TII
->get(TargetOpcode::DBG_VALUE
), false,
5695 RegAndSize
.first
, Variable
, *FragmentExpr
));
5699 // Check if ValueMap has reg number.
5700 DenseMap
<const Value
*, unsigned>::const_iterator
5701 VMI
= FuncInfo
.ValueMap
.find(V
);
5702 if (VMI
!= FuncInfo
.ValueMap
.end()) {
5703 const auto &TLI
= DAG
.getTargetLoweringInfo();
5704 RegsForValue
RFV(V
->getContext(), TLI
, DAG
.getDataLayout(), VMI
->second
,
5705 V
->getType(), getABIRegCopyCC(V
));
5706 if (RFV
.occupiesMultipleRegs()) {
5707 splitMultiRegDbgValue(RFV
.getRegsAndSizes());
5711 Op
= MachineOperand::CreateReg(VMI
->second
, false);
5712 } else if (ArgRegsAndSizes
.size() > 1) {
5713 // This was split due to the calling convention, and no virtual register
5714 // mapping exists for the value.
5715 splitMultiRegDbgValue(ArgRegsAndSizes
);
5723 assert(Variable
->isValidLocationForIntrinsic(DL
) &&
5724 "Expected inlined-at fields to agree");
5726 // If the argument arrives in a stack slot, then what the IR thought was a
5727 // normal Value is actually in memory, and we must add a deref to load it.
5729 int FI
= Op
->getIndex();
5730 unsigned Size
= DAG
.getMachineFunction().getFrameInfo().getObjectSize(FI
);
5731 if (Expr
->isImplicit()) {
5732 SmallVector
<uint64_t, 2> Ops
= {dwarf::DW_OP_deref_size
, Size
};
5733 Expr
= DIExpression::prependOpcodes(Expr
, Ops
);
5735 Expr
= DIExpression::prepend(Expr
, DIExpression::DerefBefore
);
5739 // If this location was specified with a dbg.declare, then it and its
5740 // expression calculate the address of the variable. Append a deref to
5741 // force it to be a memory location.
5743 Expr
= DIExpression::append(Expr
, {dwarf::DW_OP_deref
});
5745 FuncInfo
.ArgDbgValues
.push_back(
5746 BuildMI(MF
, DL
, TII
->get(TargetOpcode::DBG_VALUE
), false,
5747 *Op
, Variable
, Expr
));
5752 /// Return the appropriate SDDbgValue based on N.
5753 SDDbgValue
*SelectionDAGBuilder::getDbgValue(SDValue N
,
5754 DILocalVariable
*Variable
,
5757 unsigned DbgSDNodeOrder
) {
5758 if (auto *FISDN
= dyn_cast
<FrameIndexSDNode
>(N
.getNode())) {
5759 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5760 // stack slot locations.
5762 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5763 // debug values here after optimization:
5765 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
5766 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5768 // Both describe the direct values of their associated variables.
5769 return DAG
.getFrameIndexDbgValue(Variable
, Expr
, FISDN
->getIndex(),
5770 /*IsIndirect*/ false, dl
, DbgSDNodeOrder
);
5772 return DAG
.getDbgValue(Variable
, Expr
, N
.getNode(), N
.getResNo(),
5773 /*IsIndirect*/ false, dl
, DbgSDNodeOrder
);
5776 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic
) {
5777 switch (Intrinsic
) {
5778 case Intrinsic::smul_fix
:
5779 return ISD::SMULFIX
;
5780 case Intrinsic::umul_fix
:
5781 return ISD::UMULFIX
;
5782 case Intrinsic::smul_fix_sat
:
5783 return ISD::SMULFIXSAT
;
5784 case Intrinsic::umul_fix_sat
:
5785 return ISD::UMULFIXSAT
;
5786 case Intrinsic::sdiv_fix
:
5787 return ISD::SDIVFIX
;
5788 case Intrinsic::udiv_fix
:
5789 return ISD::UDIVFIX
;
5791 llvm_unreachable("Unhandled fixed point intrinsic");
5795 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst
&I
,
5796 const char *FunctionName
) {
5797 assert(FunctionName
&& "FunctionName must not be nullptr");
5798 SDValue Callee
= DAG
.getExternalSymbol(
5800 DAG
.getTargetLoweringInfo().getPointerTy(DAG
.getDataLayout()));
5801 LowerCallTo(&I
, Callee
, I
.isTailCall());
5804 /// Lower the call to the specified intrinsic function.
5805 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst
&I
,
5806 unsigned Intrinsic
) {
5807 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
5808 SDLoc sdl
= getCurSDLoc();
5809 DebugLoc dl
= getCurDebugLoc();
5812 switch (Intrinsic
) {
5814 // By default, turn this into a target intrinsic node.
5815 visitTargetIntrinsic(I
, Intrinsic
);
5817 case Intrinsic::vastart
: visitVAStart(I
); return;
5818 case Intrinsic::vaend
: visitVAEnd(I
); return;
5819 case Intrinsic::vacopy
: visitVACopy(I
); return;
5820 case Intrinsic::returnaddress
:
5821 setValue(&I
, DAG
.getNode(ISD::RETURNADDR
, sdl
,
5822 TLI
.getPointerTy(DAG
.getDataLayout()),
5823 getValue(I
.getArgOperand(0))));
5825 case Intrinsic::addressofreturnaddress
:
5826 setValue(&I
, DAG
.getNode(ISD::ADDROFRETURNADDR
, sdl
,
5827 TLI
.getPointerTy(DAG
.getDataLayout())));
5829 case Intrinsic::sponentry
:
5830 setValue(&I
, DAG
.getNode(ISD::SPONENTRY
, sdl
,
5831 TLI
.getFrameIndexTy(DAG
.getDataLayout())));
5833 case Intrinsic::frameaddress
:
5834 setValue(&I
, DAG
.getNode(ISD::FRAMEADDR
, sdl
,
5835 TLI
.getFrameIndexTy(DAG
.getDataLayout()),
5836 getValue(I
.getArgOperand(0))));
5838 case Intrinsic::read_register
: {
5839 Value
*Reg
= I
.getArgOperand(0);
5840 SDValue Chain
= getRoot();
5842 DAG
.getMDNode(cast
<MDNode
>(cast
<MetadataAsValue
>(Reg
)->getMetadata()));
5843 EVT VT
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
5844 Res
= DAG
.getNode(ISD::READ_REGISTER
, sdl
,
5845 DAG
.getVTList(VT
, MVT::Other
), Chain
, RegName
);
5847 DAG
.setRoot(Res
.getValue(1));
5850 case Intrinsic::write_register
: {
5851 Value
*Reg
= I
.getArgOperand(0);
5852 Value
*RegValue
= I
.getArgOperand(1);
5853 SDValue Chain
= getRoot();
5855 DAG
.getMDNode(cast
<MDNode
>(cast
<MetadataAsValue
>(Reg
)->getMetadata()));
5856 DAG
.setRoot(DAG
.getNode(ISD::WRITE_REGISTER
, sdl
, MVT::Other
, Chain
,
5857 RegName
, getValue(RegValue
)));
5860 case Intrinsic::memcpy
: {
5861 const auto &MCI
= cast
<MemCpyInst
>(I
);
5862 SDValue Op1
= getValue(I
.getArgOperand(0));
5863 SDValue Op2
= getValue(I
.getArgOperand(1));
5864 SDValue Op3
= getValue(I
.getArgOperand(2));
5865 // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5866 unsigned DstAlign
= std::max
<unsigned>(MCI
.getDestAlignment(), 1);
5867 unsigned SrcAlign
= std::max
<unsigned>(MCI
.getSourceAlignment(), 1);
5868 unsigned Align
= MinAlign(DstAlign
, SrcAlign
);
5869 bool isVol
= MCI
.isVolatile();
5870 bool isTC
= I
.isTailCall() && isInTailCallPosition(&I
, DAG
.getTarget());
5871 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5873 SDValue Root
= isVol
? getRoot() : getMemoryRoot();
5874 SDValue MC
= DAG
.getMemcpy(Root
, sdl
, Op1
, Op2
, Op3
, Align
, isVol
,
5876 MachinePointerInfo(I
.getArgOperand(0)),
5877 MachinePointerInfo(I
.getArgOperand(1)));
5878 updateDAGForMaybeTailCall(MC
);
5881 case Intrinsic::memset
: {
5882 const auto &MSI
= cast
<MemSetInst
>(I
);
5883 SDValue Op1
= getValue(I
.getArgOperand(0));
5884 SDValue Op2
= getValue(I
.getArgOperand(1));
5885 SDValue Op3
= getValue(I
.getArgOperand(2));
5886 // @llvm.memset defines 0 and 1 to both mean no alignment.
5887 unsigned Align
= std::max
<unsigned>(MSI
.getDestAlignment(), 1);
5888 bool isVol
= MSI
.isVolatile();
5889 bool isTC
= I
.isTailCall() && isInTailCallPosition(&I
, DAG
.getTarget());
5890 SDValue Root
= isVol
? getRoot() : getMemoryRoot();
5891 SDValue MS
= DAG
.getMemset(Root
, sdl
, Op1
, Op2
, Op3
, Align
, isVol
,
5892 isTC
, MachinePointerInfo(I
.getArgOperand(0)));
5893 updateDAGForMaybeTailCall(MS
);
5896 case Intrinsic::memmove
: {
5897 const auto &MMI
= cast
<MemMoveInst
>(I
);
5898 SDValue Op1
= getValue(I
.getArgOperand(0));
5899 SDValue Op2
= getValue(I
.getArgOperand(1));
5900 SDValue Op3
= getValue(I
.getArgOperand(2));
5901 // @llvm.memmove defines 0 and 1 to both mean no alignment.
5902 unsigned DstAlign
= std::max
<unsigned>(MMI
.getDestAlignment(), 1);
5903 unsigned SrcAlign
= std::max
<unsigned>(MMI
.getSourceAlignment(), 1);
5904 unsigned Align
= MinAlign(DstAlign
, SrcAlign
);
5905 bool isVol
= MMI
.isVolatile();
5906 bool isTC
= I
.isTailCall() && isInTailCallPosition(&I
, DAG
.getTarget());
5907 // FIXME: Support passing different dest/src alignments to the memmove DAG
5909 SDValue Root
= isVol
? getRoot() : getMemoryRoot();
5910 SDValue MM
= DAG
.getMemmove(Root
, sdl
, Op1
, Op2
, Op3
, Align
, isVol
,
5911 isTC
, MachinePointerInfo(I
.getArgOperand(0)),
5912 MachinePointerInfo(I
.getArgOperand(1)));
5913 updateDAGForMaybeTailCall(MM
);
5916 case Intrinsic::memcpy_element_unordered_atomic
: {
5917 const AtomicMemCpyInst
&MI
= cast
<AtomicMemCpyInst
>(I
);
5918 SDValue Dst
= getValue(MI
.getRawDest());
5919 SDValue Src
= getValue(MI
.getRawSource());
5920 SDValue Length
= getValue(MI
.getLength());
5922 unsigned DstAlign
= MI
.getDestAlignment();
5923 unsigned SrcAlign
= MI
.getSourceAlignment();
5924 Type
*LengthTy
= MI
.getLength()->getType();
5925 unsigned ElemSz
= MI
.getElementSizeInBytes();
5926 bool isTC
= I
.isTailCall() && isInTailCallPosition(&I
, DAG
.getTarget());
5927 SDValue MC
= DAG
.getAtomicMemcpy(getRoot(), sdl
, Dst
, DstAlign
, Src
,
5928 SrcAlign
, Length
, LengthTy
, ElemSz
, isTC
,
5929 MachinePointerInfo(MI
.getRawDest()),
5930 MachinePointerInfo(MI
.getRawSource()));
5931 updateDAGForMaybeTailCall(MC
);
5934 case Intrinsic::memmove_element_unordered_atomic
: {
5935 auto &MI
= cast
<AtomicMemMoveInst
>(I
);
5936 SDValue Dst
= getValue(MI
.getRawDest());
5937 SDValue Src
= getValue(MI
.getRawSource());
5938 SDValue Length
= getValue(MI
.getLength());
5940 unsigned DstAlign
= MI
.getDestAlignment();
5941 unsigned SrcAlign
= MI
.getSourceAlignment();
5942 Type
*LengthTy
= MI
.getLength()->getType();
5943 unsigned ElemSz
= MI
.getElementSizeInBytes();
5944 bool isTC
= I
.isTailCall() && isInTailCallPosition(&I
, DAG
.getTarget());
5945 SDValue MC
= DAG
.getAtomicMemmove(getRoot(), sdl
, Dst
, DstAlign
, Src
,
5946 SrcAlign
, Length
, LengthTy
, ElemSz
, isTC
,
5947 MachinePointerInfo(MI
.getRawDest()),
5948 MachinePointerInfo(MI
.getRawSource()));
5949 updateDAGForMaybeTailCall(MC
);
5952 case Intrinsic::memset_element_unordered_atomic
: {
5953 auto &MI
= cast
<AtomicMemSetInst
>(I
);
5954 SDValue Dst
= getValue(MI
.getRawDest());
5955 SDValue Val
= getValue(MI
.getValue());
5956 SDValue Length
= getValue(MI
.getLength());
5958 unsigned DstAlign
= MI
.getDestAlignment();
5959 Type
*LengthTy
= MI
.getLength()->getType();
5960 unsigned ElemSz
= MI
.getElementSizeInBytes();
5961 bool isTC
= I
.isTailCall() && isInTailCallPosition(&I
, DAG
.getTarget());
5962 SDValue MC
= DAG
.getAtomicMemset(getRoot(), sdl
, Dst
, DstAlign
, Val
, Length
,
5963 LengthTy
, ElemSz
, isTC
,
5964 MachinePointerInfo(MI
.getRawDest()));
5965 updateDAGForMaybeTailCall(MC
);
5968 case Intrinsic::dbg_addr
:
5969 case Intrinsic::dbg_declare
: {
5970 const auto &DI
= cast
<DbgVariableIntrinsic
>(I
);
5971 DILocalVariable
*Variable
= DI
.getVariable();
5972 DIExpression
*Expression
= DI
.getExpression();
5973 dropDanglingDebugInfo(Variable
, Expression
);
5974 assert(Variable
&& "Missing variable");
5976 // Check if address has undef value.
5977 const Value
*Address
= DI
.getVariableLocation();
5978 if (!Address
|| isa
<UndefValue
>(Address
) ||
5979 (Address
->use_empty() && !isa
<Argument
>(Address
))) {
5980 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
<< "\n");
5984 bool isParameter
= Variable
->isParameter() || isa
<Argument
>(Address
);
5986 // Check if this variable can be described by a frame index, typically
5987 // either as a static alloca or a byval parameter.
5988 int FI
= std::numeric_limits
<int>::max();
5989 if (const auto *AI
=
5990 dyn_cast
<AllocaInst
>(Address
->stripInBoundsConstantOffsets())) {
5991 if (AI
->isStaticAlloca()) {
5992 auto I
= FuncInfo
.StaticAllocaMap
.find(AI
);
5993 if (I
!= FuncInfo
.StaticAllocaMap
.end())
5996 } else if (const auto *Arg
= dyn_cast
<Argument
>(
5997 Address
->stripInBoundsConstantOffsets())) {
5998 FI
= FuncInfo
.getArgumentFrameIndex(Arg
);
6001 // llvm.dbg.addr is control dependent and always generates indirect
6002 // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
6003 // the MachineFunction variable table.
6004 if (FI
!= std::numeric_limits
<int>::max()) {
6005 if (Intrinsic
== Intrinsic::dbg_addr
) {
6006 SDDbgValue
*SDV
= DAG
.getFrameIndexDbgValue(
6007 Variable
, Expression
, FI
, /*IsIndirect*/ true, dl
, SDNodeOrder
);
6008 DAG
.AddDbgValue(SDV
, getRoot().getNode(), isParameter
);
6013 SDValue
&N
= NodeMap
[Address
];
6014 if (!N
.getNode() && isa
<Argument
>(Address
))
6015 // Check unused arguments map.
6016 N
= UnusedArgNodeMap
[Address
];
6019 if (const BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(Address
))
6020 Address
= BCI
->getOperand(0);
6021 // Parameters are handled specially.
6022 auto FINode
= dyn_cast
<FrameIndexSDNode
>(N
.getNode());
6023 if (isParameter
&& FINode
) {
6024 // Byval parameter. We have a frame index at this point.
6026 DAG
.getFrameIndexDbgValue(Variable
, Expression
, FINode
->getIndex(),
6027 /*IsIndirect*/ true, dl
, SDNodeOrder
);
6028 } else if (isa
<Argument
>(Address
)) {
6029 // Address is an argument, so try to emit its dbg value using
6030 // virtual register info from the FuncInfo.ValueMap.
6031 EmitFuncArgumentDbgValue(Address
, Variable
, Expression
, dl
, true, N
);
6034 SDV
= DAG
.getDbgValue(Variable
, Expression
, N
.getNode(), N
.getResNo(),
6035 true, dl
, SDNodeOrder
);
6037 DAG
.AddDbgValue(SDV
, N
.getNode(), isParameter
);
6039 // If Address is an argument then try to emit its dbg value using
6040 // virtual register info from the FuncInfo.ValueMap.
6041 if (!EmitFuncArgumentDbgValue(Address
, Variable
, Expression
, dl
, true,
6043 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
<< "\n");
6048 case Intrinsic::dbg_label
: {
6049 const DbgLabelInst
&DI
= cast
<DbgLabelInst
>(I
);
6050 DILabel
*Label
= DI
.getLabel();
6051 assert(Label
&& "Missing label");
6054 SDV
= DAG
.getDbgLabel(Label
, dl
, SDNodeOrder
);
6055 DAG
.AddDbgLabel(SDV
);
6058 case Intrinsic::dbg_value
: {
6059 const DbgValueInst
&DI
= cast
<DbgValueInst
>(I
);
6060 assert(DI
.getVariable() && "Missing variable");
6062 DILocalVariable
*Variable
= DI
.getVariable();
6063 DIExpression
*Expression
= DI
.getExpression();
6064 dropDanglingDebugInfo(Variable
, Expression
);
6065 const Value
*V
= DI
.getValue();
6069 if (handleDebugValue(V
, Variable
, Expression
, dl
, DI
.getDebugLoc(),
6073 // TODO: Dangling debug info will eventually either be resolved or produce
6074 // an Undef DBG_VALUE. However in the resolution case, a gap may appear
6075 // between the original dbg.value location and its resolved DBG_VALUE, which
6076 // we should ideally fill with an extra Undef DBG_VALUE.
6078 DanglingDebugInfoMap
[V
].emplace_back(&DI
, dl
, SDNodeOrder
);
6082 case Intrinsic::eh_typeid_for
: {
6083 // Find the type id for the given typeinfo.
6084 GlobalValue
*GV
= ExtractTypeInfo(I
.getArgOperand(0));
6085 unsigned TypeID
= DAG
.getMachineFunction().getTypeIDFor(GV
);
6086 Res
= DAG
.getConstant(TypeID
, sdl
, MVT::i32
);
6091 case Intrinsic::eh_return_i32
:
6092 case Intrinsic::eh_return_i64
:
6093 DAG
.getMachineFunction().setCallsEHReturn(true);
6094 DAG
.setRoot(DAG
.getNode(ISD::EH_RETURN
, sdl
,
6097 getValue(I
.getArgOperand(0)),
6098 getValue(I
.getArgOperand(1))));
6100 case Intrinsic::eh_unwind_init
:
6101 DAG
.getMachineFunction().setCallsUnwindInit(true);
6103 case Intrinsic::eh_dwarf_cfa
:
6104 setValue(&I
, DAG
.getNode(ISD::EH_DWARF_CFA
, sdl
,
6105 TLI
.getPointerTy(DAG
.getDataLayout()),
6106 getValue(I
.getArgOperand(0))));
6108 case Intrinsic::eh_sjlj_callsite
: {
6109 MachineModuleInfo
&MMI
= DAG
.getMachineFunction().getMMI();
6110 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(I
.getArgOperand(0));
6111 assert(CI
&& "Non-constant call site value in eh.sjlj.callsite!");
6112 assert(MMI
.getCurrentCallSite() == 0 && "Overlapping call sites!");
6114 MMI
.setCurrentCallSite(CI
->getZExtValue());
6117 case Intrinsic::eh_sjlj_functioncontext
: {
6118 // Get and store the index of the function context.
6119 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
6121 cast
<AllocaInst
>(I
.getArgOperand(0)->stripPointerCasts());
6122 int FI
= FuncInfo
.StaticAllocaMap
[FnCtx
];
6123 MFI
.setFunctionContextIndex(FI
);
6126 case Intrinsic::eh_sjlj_setjmp
: {
6129 Ops
[1] = getValue(I
.getArgOperand(0));
6130 SDValue Op
= DAG
.getNode(ISD::EH_SJLJ_SETJMP
, sdl
,
6131 DAG
.getVTList(MVT::i32
, MVT::Other
), Ops
);
6132 setValue(&I
, Op
.getValue(0));
6133 DAG
.setRoot(Op
.getValue(1));
6136 case Intrinsic::eh_sjlj_longjmp
:
6137 DAG
.setRoot(DAG
.getNode(ISD::EH_SJLJ_LONGJMP
, sdl
, MVT::Other
,
6138 getRoot(), getValue(I
.getArgOperand(0))));
6140 case Intrinsic::eh_sjlj_setup_dispatch
:
6141 DAG
.setRoot(DAG
.getNode(ISD::EH_SJLJ_SETUP_DISPATCH
, sdl
, MVT::Other
,
6144 case Intrinsic::masked_gather
:
6145 visitMaskedGather(I
);
6147 case Intrinsic::masked_load
:
6150 case Intrinsic::masked_scatter
:
6151 visitMaskedScatter(I
);
6153 case Intrinsic::masked_store
:
6154 visitMaskedStore(I
);
6156 case Intrinsic::masked_expandload
:
6157 visitMaskedLoad(I
, true /* IsExpanding */);
6159 case Intrinsic::masked_compressstore
:
6160 visitMaskedStore(I
, true /* IsCompressing */);
6162 case Intrinsic::powi
:
6163 setValue(&I
, ExpandPowI(sdl
, getValue(I
.getArgOperand(0)),
6164 getValue(I
.getArgOperand(1)), DAG
));
6166 case Intrinsic::log
:
6167 setValue(&I
, expandLog(sdl
, getValue(I
.getArgOperand(0)), DAG
, TLI
));
6169 case Intrinsic::log2
:
6170 setValue(&I
, expandLog2(sdl
, getValue(I
.getArgOperand(0)), DAG
, TLI
));
6172 case Intrinsic::log10
:
6173 setValue(&I
, expandLog10(sdl
, getValue(I
.getArgOperand(0)), DAG
, TLI
));
6175 case Intrinsic::exp
:
6176 setValue(&I
, expandExp(sdl
, getValue(I
.getArgOperand(0)), DAG
, TLI
));
6178 case Intrinsic::exp2
:
6179 setValue(&I
, expandExp2(sdl
, getValue(I
.getArgOperand(0)), DAG
, TLI
));
6181 case Intrinsic::pow
:
6182 setValue(&I
, expandPow(sdl
, getValue(I
.getArgOperand(0)),
6183 getValue(I
.getArgOperand(1)), DAG
, TLI
));
6185 case Intrinsic::sqrt
:
6186 case Intrinsic::fabs
:
6187 case Intrinsic::sin
:
6188 case Intrinsic::cos
:
6189 case Intrinsic::floor
:
6190 case Intrinsic::ceil
:
6191 case Intrinsic::trunc
:
6192 case Intrinsic::rint
:
6193 case Intrinsic::nearbyint
:
6194 case Intrinsic::round
:
6195 case Intrinsic::canonicalize
: {
6197 switch (Intrinsic
) {
6198 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6199 case Intrinsic::sqrt
: Opcode
= ISD::FSQRT
; break;
6200 case Intrinsic::fabs
: Opcode
= ISD::FABS
; break;
6201 case Intrinsic::sin
: Opcode
= ISD::FSIN
; break;
6202 case Intrinsic::cos
: Opcode
= ISD::FCOS
; break;
6203 case Intrinsic::floor
: Opcode
= ISD::FFLOOR
; break;
6204 case Intrinsic::ceil
: Opcode
= ISD::FCEIL
; break;
6205 case Intrinsic::trunc
: Opcode
= ISD::FTRUNC
; break;
6206 case Intrinsic::rint
: Opcode
= ISD::FRINT
; break;
6207 case Intrinsic::nearbyint
: Opcode
= ISD::FNEARBYINT
; break;
6208 case Intrinsic::round
: Opcode
= ISD::FROUND
; break;
6209 case Intrinsic::canonicalize
: Opcode
= ISD::FCANONICALIZE
; break;
6212 setValue(&I
, DAG
.getNode(Opcode
, sdl
,
6213 getValue(I
.getArgOperand(0)).getValueType(),
6214 getValue(I
.getArgOperand(0))));
6217 case Intrinsic::lround
:
6218 case Intrinsic::llround
:
6219 case Intrinsic::lrint
:
6220 case Intrinsic::llrint
: {
6222 switch (Intrinsic
) {
6223 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6224 case Intrinsic::lround
: Opcode
= ISD::LROUND
; break;
6225 case Intrinsic::llround
: Opcode
= ISD::LLROUND
; break;
6226 case Intrinsic::lrint
: Opcode
= ISD::LRINT
; break;
6227 case Intrinsic::llrint
: Opcode
= ISD::LLRINT
; break;
6230 EVT RetVT
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
6231 setValue(&I
, DAG
.getNode(Opcode
, sdl
, RetVT
,
6232 getValue(I
.getArgOperand(0))));
6235 case Intrinsic::minnum
:
6236 setValue(&I
, DAG
.getNode(ISD::FMINNUM
, sdl
,
6237 getValue(I
.getArgOperand(0)).getValueType(),
6238 getValue(I
.getArgOperand(0)),
6239 getValue(I
.getArgOperand(1))));
6241 case Intrinsic::maxnum
:
6242 setValue(&I
, DAG
.getNode(ISD::FMAXNUM
, sdl
,
6243 getValue(I
.getArgOperand(0)).getValueType(),
6244 getValue(I
.getArgOperand(0)),
6245 getValue(I
.getArgOperand(1))));
6247 case Intrinsic::minimum
:
6248 setValue(&I
, DAG
.getNode(ISD::FMINIMUM
, sdl
,
6249 getValue(I
.getArgOperand(0)).getValueType(),
6250 getValue(I
.getArgOperand(0)),
6251 getValue(I
.getArgOperand(1))));
6253 case Intrinsic::maximum
:
6254 setValue(&I
, DAG
.getNode(ISD::FMAXIMUM
, sdl
,
6255 getValue(I
.getArgOperand(0)).getValueType(),
6256 getValue(I
.getArgOperand(0)),
6257 getValue(I
.getArgOperand(1))));
6259 case Intrinsic::copysign
:
6260 setValue(&I
, DAG
.getNode(ISD::FCOPYSIGN
, sdl
,
6261 getValue(I
.getArgOperand(0)).getValueType(),
6262 getValue(I
.getArgOperand(0)),
6263 getValue(I
.getArgOperand(1))));
6265 case Intrinsic::fma
:
6266 setValue(&I
, DAG
.getNode(ISD::FMA
, sdl
,
6267 getValue(I
.getArgOperand(0)).getValueType(),
6268 getValue(I
.getArgOperand(0)),
6269 getValue(I
.getArgOperand(1)),
6270 getValue(I
.getArgOperand(2))));
6272 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
6273 case Intrinsic::INTRINSIC:
6274 #include "llvm/IR/ConstrainedOps.def"
6275 visitConstrainedFPIntrinsic(cast
<ConstrainedFPIntrinsic
>(I
));
6277 case Intrinsic::fmuladd
: {
6278 EVT VT
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
6279 if (TM
.Options
.AllowFPOpFusion
!= FPOpFusion::Strict
&&
6280 TLI
.isFMAFasterThanFMulAndFAdd(DAG
.getMachineFunction(), VT
)) {
6281 setValue(&I
, DAG
.getNode(ISD::FMA
, sdl
,
6282 getValue(I
.getArgOperand(0)).getValueType(),
6283 getValue(I
.getArgOperand(0)),
6284 getValue(I
.getArgOperand(1)),
6285 getValue(I
.getArgOperand(2))));
6287 // TODO: Intrinsic calls should have fast-math-flags.
6288 SDValue Mul
= DAG
.getNode(ISD::FMUL
, sdl
,
6289 getValue(I
.getArgOperand(0)).getValueType(),
6290 getValue(I
.getArgOperand(0)),
6291 getValue(I
.getArgOperand(1)));
6292 SDValue Add
= DAG
.getNode(ISD::FADD
, sdl
,
6293 getValue(I
.getArgOperand(0)).getValueType(),
6295 getValue(I
.getArgOperand(2)));
6300 case Intrinsic::convert_to_fp16
:
6301 setValue(&I
, DAG
.getNode(ISD::BITCAST
, sdl
, MVT::i16
,
6302 DAG
.getNode(ISD::FP_ROUND
, sdl
, MVT::f16
,
6303 getValue(I
.getArgOperand(0)),
6304 DAG
.getTargetConstant(0, sdl
,
6307 case Intrinsic::convert_from_fp16
:
6308 setValue(&I
, DAG
.getNode(ISD::FP_EXTEND
, sdl
,
6309 TLI
.getValueType(DAG
.getDataLayout(), I
.getType()),
6310 DAG
.getNode(ISD::BITCAST
, sdl
, MVT::f16
,
6311 getValue(I
.getArgOperand(0)))));
6313 case Intrinsic::pcmarker
: {
6314 SDValue Tmp
= getValue(I
.getArgOperand(0));
6315 DAG
.setRoot(DAG
.getNode(ISD::PCMARKER
, sdl
, MVT::Other
, getRoot(), Tmp
));
6318 case Intrinsic::readcyclecounter
: {
6319 SDValue Op
= getRoot();
6320 Res
= DAG
.getNode(ISD::READCYCLECOUNTER
, sdl
,
6321 DAG
.getVTList(MVT::i64
, MVT::Other
), Op
);
6323 DAG
.setRoot(Res
.getValue(1));
6326 case Intrinsic::bitreverse
:
6327 setValue(&I
, DAG
.getNode(ISD::BITREVERSE
, sdl
,
6328 getValue(I
.getArgOperand(0)).getValueType(),
6329 getValue(I
.getArgOperand(0))));
6331 case Intrinsic::bswap
:
6332 setValue(&I
, DAG
.getNode(ISD::BSWAP
, sdl
,
6333 getValue(I
.getArgOperand(0)).getValueType(),
6334 getValue(I
.getArgOperand(0))));
6336 case Intrinsic::cttz
: {
6337 SDValue Arg
= getValue(I
.getArgOperand(0));
6338 ConstantInt
*CI
= cast
<ConstantInt
>(I
.getArgOperand(1));
6339 EVT Ty
= Arg
.getValueType();
6340 setValue(&I
, DAG
.getNode(CI
->isZero() ? ISD::CTTZ
: ISD::CTTZ_ZERO_UNDEF
,
6344 case Intrinsic::ctlz
: {
6345 SDValue Arg
= getValue(I
.getArgOperand(0));
6346 ConstantInt
*CI
= cast
<ConstantInt
>(I
.getArgOperand(1));
6347 EVT Ty
= Arg
.getValueType();
6348 setValue(&I
, DAG
.getNode(CI
->isZero() ? ISD::CTLZ
: ISD::CTLZ_ZERO_UNDEF
,
6352 case Intrinsic::ctpop
: {
6353 SDValue Arg
= getValue(I
.getArgOperand(0));
6354 EVT Ty
= Arg
.getValueType();
6355 setValue(&I
, DAG
.getNode(ISD::CTPOP
, sdl
, Ty
, Arg
));
6358 case Intrinsic::fshl
:
6359 case Intrinsic::fshr
: {
6360 bool IsFSHL
= Intrinsic
== Intrinsic::fshl
;
6361 SDValue X
= getValue(I
.getArgOperand(0));
6362 SDValue Y
= getValue(I
.getArgOperand(1));
6363 SDValue Z
= getValue(I
.getArgOperand(2));
6364 EVT VT
= X
.getValueType();
6365 SDValue BitWidthC
= DAG
.getConstant(VT
.getScalarSizeInBits(), sdl
, VT
);
6366 SDValue Zero
= DAG
.getConstant(0, sdl
, VT
);
6367 SDValue ShAmt
= DAG
.getNode(ISD::UREM
, sdl
, VT
, Z
, BitWidthC
);
6369 auto FunnelOpcode
= IsFSHL
? ISD::FSHL
: ISD::FSHR
;
6370 if (TLI
.isOperationLegalOrCustom(FunnelOpcode
, VT
)) {
6371 setValue(&I
, DAG
.getNode(FunnelOpcode
, sdl
, VT
, X
, Y
, Z
));
6375 // When X == Y, this is rotate. If the data type has a power-of-2 size, we
6376 // avoid the select that is necessary in the general case to filter out
6377 // the 0-shift possibility that leads to UB.
6378 if (X
== Y
&& isPowerOf2_32(VT
.getScalarSizeInBits())) {
6379 auto RotateOpcode
= IsFSHL
? ISD::ROTL
: ISD::ROTR
;
6380 if (TLI
.isOperationLegalOrCustom(RotateOpcode
, VT
)) {
6381 setValue(&I
, DAG
.getNode(RotateOpcode
, sdl
, VT
, X
, Z
));
6385 // Some targets only rotate one way. Try the opposite direction.
6386 RotateOpcode
= IsFSHL
? ISD::ROTR
: ISD::ROTL
;
6387 if (TLI
.isOperationLegalOrCustom(RotateOpcode
, VT
)) {
6388 // Negate the shift amount because it is safe to ignore the high bits.
6389 SDValue NegShAmt
= DAG
.getNode(ISD::SUB
, sdl
, VT
, Zero
, Z
);
6390 setValue(&I
, DAG
.getNode(RotateOpcode
, sdl
, VT
, X
, NegShAmt
));
6394 // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW))
6395 // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW))
6396 SDValue NegZ
= DAG
.getNode(ISD::SUB
, sdl
, VT
, Zero
, Z
);
6397 SDValue NShAmt
= DAG
.getNode(ISD::UREM
, sdl
, VT
, NegZ
, BitWidthC
);
6398 SDValue ShX
= DAG
.getNode(ISD::SHL
, sdl
, VT
, X
, IsFSHL
? ShAmt
: NShAmt
);
6399 SDValue ShY
= DAG
.getNode(ISD::SRL
, sdl
, VT
, X
, IsFSHL
? NShAmt
: ShAmt
);
6400 setValue(&I
, DAG
.getNode(ISD::OR
, sdl
, VT
, ShX
, ShY
));
6404 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
6405 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
6406 SDValue InvShAmt
= DAG
.getNode(ISD::SUB
, sdl
, VT
, BitWidthC
, ShAmt
);
6407 SDValue ShX
= DAG
.getNode(ISD::SHL
, sdl
, VT
, X
, IsFSHL
? ShAmt
: InvShAmt
);
6408 SDValue ShY
= DAG
.getNode(ISD::SRL
, sdl
, VT
, Y
, IsFSHL
? InvShAmt
: ShAmt
);
6409 SDValue Or
= DAG
.getNode(ISD::OR
, sdl
, VT
, ShX
, ShY
);
6411 // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
6412 // and that is undefined. We must compare and select to avoid UB.
6415 CCVT
= EVT::getVectorVT(*Context
, CCVT
, VT
.getVectorNumElements());
6417 // For fshl, 0-shift returns the 1st arg (X).
6418 // For fshr, 0-shift returns the 2nd arg (Y).
6419 SDValue IsZeroShift
= DAG
.getSetCC(sdl
, CCVT
, ShAmt
, Zero
, ISD::SETEQ
);
6420 setValue(&I
, DAG
.getSelect(sdl
, VT
, IsZeroShift
, IsFSHL
? X
: Y
, Or
));
6423 case Intrinsic::sadd_sat
: {
6424 SDValue Op1
= getValue(I
.getArgOperand(0));
6425 SDValue Op2
= getValue(I
.getArgOperand(1));
6426 setValue(&I
, DAG
.getNode(ISD::SADDSAT
, sdl
, Op1
.getValueType(), Op1
, Op2
));
6429 case Intrinsic::uadd_sat
: {
6430 SDValue Op1
= getValue(I
.getArgOperand(0));
6431 SDValue Op2
= getValue(I
.getArgOperand(1));
6432 setValue(&I
, DAG
.getNode(ISD::UADDSAT
, sdl
, Op1
.getValueType(), Op1
, Op2
));
6435 case Intrinsic::ssub_sat
: {
6436 SDValue Op1
= getValue(I
.getArgOperand(0));
6437 SDValue Op2
= getValue(I
.getArgOperand(1));
6438 setValue(&I
, DAG
.getNode(ISD::SSUBSAT
, sdl
, Op1
.getValueType(), Op1
, Op2
));
6441 case Intrinsic::usub_sat
: {
6442 SDValue Op1
= getValue(I
.getArgOperand(0));
6443 SDValue Op2
= getValue(I
.getArgOperand(1));
6444 setValue(&I
, DAG
.getNode(ISD::USUBSAT
, sdl
, Op1
.getValueType(), Op1
, Op2
));
6447 case Intrinsic::smul_fix
:
6448 case Intrinsic::umul_fix
:
6449 case Intrinsic::smul_fix_sat
:
6450 case Intrinsic::umul_fix_sat
: {
6451 SDValue Op1
= getValue(I
.getArgOperand(0));
6452 SDValue Op2
= getValue(I
.getArgOperand(1));
6453 SDValue Op3
= getValue(I
.getArgOperand(2));
6454 setValue(&I
, DAG
.getNode(FixedPointIntrinsicToOpcode(Intrinsic
), sdl
,
6455 Op1
.getValueType(), Op1
, Op2
, Op3
));
6458 case Intrinsic::sdiv_fix
:
6459 case Intrinsic::udiv_fix
: {
6460 SDValue Op1
= getValue(I
.getArgOperand(0));
6461 SDValue Op2
= getValue(I
.getArgOperand(1));
6462 SDValue Op3
= getValue(I
.getArgOperand(2));
6463 setValue(&I
, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic
), sdl
,
6464 Op1
, Op2
, Op3
, DAG
, TLI
));
6467 case Intrinsic::stacksave
: {
6468 SDValue Op
= getRoot();
6470 ISD::STACKSAVE
, sdl
,
6471 DAG
.getVTList(TLI
.getPointerTy(DAG
.getDataLayout()), MVT::Other
), Op
);
6473 DAG
.setRoot(Res
.getValue(1));
6476 case Intrinsic::stackrestore
:
6477 Res
= getValue(I
.getArgOperand(0));
6478 DAG
.setRoot(DAG
.getNode(ISD::STACKRESTORE
, sdl
, MVT::Other
, getRoot(), Res
));
6480 case Intrinsic::get_dynamic_area_offset
: {
6481 SDValue Op
= getRoot();
6482 EVT PtrTy
= TLI
.getPointerTy(DAG
.getDataLayout());
6483 EVT ResTy
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
6484 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6486 if (PtrTy
.getSizeInBits() < ResTy
.getSizeInBits())
6487 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6489 Res
= DAG
.getNode(ISD::GET_DYNAMIC_AREA_OFFSET
, sdl
, DAG
.getVTList(ResTy
),
6495 case Intrinsic::stackguard
: {
6496 EVT PtrTy
= TLI
.getPointerTy(DAG
.getDataLayout());
6497 MachineFunction
&MF
= DAG
.getMachineFunction();
6498 const Module
&M
= *MF
.getFunction().getParent();
6499 SDValue Chain
= getRoot();
6500 if (TLI
.useLoadStackGuardNode()) {
6501 Res
= getLoadStackGuard(DAG
, sdl
, Chain
);
6503 const Value
*Global
= TLI
.getSDagStackGuard(M
);
6504 unsigned Align
= DL
->getPrefTypeAlignment(Global
->getType());
6505 Res
= DAG
.getLoad(PtrTy
, sdl
, Chain
, getValue(Global
),
6506 MachinePointerInfo(Global
, 0), Align
,
6507 MachineMemOperand::MOVolatile
);
6509 if (TLI
.useStackGuardXorFP())
6510 Res
= TLI
.emitStackGuardXorFP(DAG
, Res
, sdl
);
6515 case Intrinsic::stackprotector
: {
6516 // Emit code into the DAG to store the stack guard onto the stack.
6517 MachineFunction
&MF
= DAG
.getMachineFunction();
6518 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
6519 EVT PtrTy
= TLI
.getPointerTy(DAG
.getDataLayout());
6520 SDValue Src
, Chain
= getRoot();
6522 if (TLI
.useLoadStackGuardNode())
6523 Src
= getLoadStackGuard(DAG
, sdl
, Chain
);
6525 Src
= getValue(I
.getArgOperand(0)); // The guard's value.
6527 AllocaInst
*Slot
= cast
<AllocaInst
>(I
.getArgOperand(1));
6529 int FI
= FuncInfo
.StaticAllocaMap
[Slot
];
6530 MFI
.setStackProtectorIndex(FI
);
6532 SDValue FIN
= DAG
.getFrameIndex(FI
, PtrTy
);
6534 // Store the stack protector onto the stack.
6535 Res
= DAG
.getStore(Chain
, sdl
, Src
, FIN
, MachinePointerInfo::getFixedStack(
6536 DAG
.getMachineFunction(), FI
),
6537 /* Alignment = */ 0, MachineMemOperand::MOVolatile
);
6542 case Intrinsic::objectsize
:
6543 llvm_unreachable("llvm.objectsize.* should have been lowered already");
6545 case Intrinsic::is_constant
:
6546 llvm_unreachable("llvm.is.constant.* should have been lowered already");
6548 case Intrinsic::annotation
:
6549 case Intrinsic::ptr_annotation
:
6550 case Intrinsic::launder_invariant_group
:
6551 case Intrinsic::strip_invariant_group
:
6552 // Drop the intrinsic, but forward the value
6553 setValue(&I
, getValue(I
.getOperand(0)));
6555 case Intrinsic::assume
:
6556 case Intrinsic::var_annotation
:
6557 case Intrinsic::sideeffect
:
6558 // Discard annotate attributes, assumptions, and artificial side-effects.
6561 case Intrinsic::codeview_annotation
: {
6562 // Emit a label associated with this metadata.
6563 MachineFunction
&MF
= DAG
.getMachineFunction();
6565 MF
.getMMI().getContext().createTempSymbol("annotation", true);
6566 Metadata
*MD
= cast
<MetadataAsValue
>(I
.getArgOperand(0))->getMetadata();
6567 MF
.addCodeViewAnnotation(Label
, cast
<MDNode
>(MD
));
6568 Res
= DAG
.getLabelNode(ISD::ANNOTATION_LABEL
, sdl
, getRoot(), Label
);
6573 case Intrinsic::init_trampoline
: {
6574 const Function
*F
= cast
<Function
>(I
.getArgOperand(1)->stripPointerCasts());
6578 Ops
[1] = getValue(I
.getArgOperand(0));
6579 Ops
[2] = getValue(I
.getArgOperand(1));
6580 Ops
[3] = getValue(I
.getArgOperand(2));
6581 Ops
[4] = DAG
.getSrcValue(I
.getArgOperand(0));
6582 Ops
[5] = DAG
.getSrcValue(F
);
6584 Res
= DAG
.getNode(ISD::INIT_TRAMPOLINE
, sdl
, MVT::Other
, Ops
);
6589 case Intrinsic::adjust_trampoline
:
6590 setValue(&I
, DAG
.getNode(ISD::ADJUST_TRAMPOLINE
, sdl
,
6591 TLI
.getPointerTy(DAG
.getDataLayout()),
6592 getValue(I
.getArgOperand(0))));
6594 case Intrinsic::gcroot
: {
6595 assert(DAG
.getMachineFunction().getFunction().hasGC() &&
6596 "only valid in functions with gc specified, enforced by Verifier");
6597 assert(GFI
&& "implied by previous");
6598 const Value
*Alloca
= I
.getArgOperand(0)->stripPointerCasts();
6599 const Constant
*TypeMap
= cast
<Constant
>(I
.getArgOperand(1));
6601 FrameIndexSDNode
*FI
= cast
<FrameIndexSDNode
>(getValue(Alloca
).getNode());
6602 GFI
->addStackRoot(FI
->getIndex(), TypeMap
);
6605 case Intrinsic::gcread
:
6606 case Intrinsic::gcwrite
:
6607 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
6608 case Intrinsic::flt_rounds
:
6609 setValue(&I
, DAG
.getNode(ISD::FLT_ROUNDS_
, sdl
, MVT::i32
));
6612 case Intrinsic::expect
:
6613 // Just replace __builtin_expect(exp, c) with EXP.
6614 setValue(&I
, getValue(I
.getArgOperand(0)));
6617 case Intrinsic::debugtrap
:
6618 case Intrinsic::trap
: {
6619 StringRef TrapFuncName
=
6621 .getAttribute(AttributeList::FunctionIndex
, "trap-func-name")
6622 .getValueAsString();
6623 if (TrapFuncName
.empty()) {
6624 ISD::NodeType Op
= (Intrinsic
== Intrinsic::trap
) ?
6625 ISD::TRAP
: ISD::DEBUGTRAP
;
6626 DAG
.setRoot(DAG
.getNode(Op
, sdl
,MVT::Other
, getRoot()));
6629 TargetLowering::ArgListTy Args
;
6631 TargetLowering::CallLoweringInfo
CLI(DAG
);
6632 CLI
.setDebugLoc(sdl
).setChain(getRoot()).setLibCallee(
6633 CallingConv::C
, I
.getType(),
6634 DAG
.getExternalSymbol(TrapFuncName
.data(),
6635 TLI
.getPointerTy(DAG
.getDataLayout())),
6638 std::pair
<SDValue
, SDValue
> Result
= TLI
.LowerCallTo(CLI
);
6639 DAG
.setRoot(Result
.second
);
6643 case Intrinsic::uadd_with_overflow
:
6644 case Intrinsic::sadd_with_overflow
:
6645 case Intrinsic::usub_with_overflow
:
6646 case Intrinsic::ssub_with_overflow
:
6647 case Intrinsic::umul_with_overflow
:
6648 case Intrinsic::smul_with_overflow
: {
6650 switch (Intrinsic
) {
6651 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6652 case Intrinsic::uadd_with_overflow
: Op
= ISD::UADDO
; break;
6653 case Intrinsic::sadd_with_overflow
: Op
= ISD::SADDO
; break;
6654 case Intrinsic::usub_with_overflow
: Op
= ISD::USUBO
; break;
6655 case Intrinsic::ssub_with_overflow
: Op
= ISD::SSUBO
; break;
6656 case Intrinsic::umul_with_overflow
: Op
= ISD::UMULO
; break;
6657 case Intrinsic::smul_with_overflow
: Op
= ISD::SMULO
; break;
6659 SDValue Op1
= getValue(I
.getArgOperand(0));
6660 SDValue Op2
= getValue(I
.getArgOperand(1));
6662 EVT ResultVT
= Op1
.getValueType();
6663 EVT OverflowVT
= MVT::i1
;
6664 if (ResultVT
.isVector())
6665 OverflowVT
= EVT::getVectorVT(
6666 *Context
, OverflowVT
, ResultVT
.getVectorNumElements());
6668 SDVTList VTs
= DAG
.getVTList(ResultVT
, OverflowVT
);
6669 setValue(&I
, DAG
.getNode(Op
, sdl
, VTs
, Op1
, Op2
));
6672 case Intrinsic::prefetch
: {
6674 unsigned rw
= cast
<ConstantInt
>(I
.getArgOperand(1))->getZExtValue();
6675 auto Flags
= rw
== 0 ? MachineMemOperand::MOLoad
:MachineMemOperand::MOStore
;
6676 Ops
[0] = DAG
.getRoot();
6677 Ops
[1] = getValue(I
.getArgOperand(0));
6678 Ops
[2] = getValue(I
.getArgOperand(1));
6679 Ops
[3] = getValue(I
.getArgOperand(2));
6680 Ops
[4] = getValue(I
.getArgOperand(3));
6681 SDValue Result
= DAG
.getMemIntrinsicNode(ISD::PREFETCH
, sdl
,
6682 DAG
.getVTList(MVT::Other
), Ops
,
6683 EVT::getIntegerVT(*Context
, 8),
6684 MachinePointerInfo(I
.getArgOperand(0)),
6688 // Chain the prefetch in parallell with any pending loads, to stay out of
6689 // the way of later optimizations.
6690 PendingLoads
.push_back(Result
);
6692 DAG
.setRoot(Result
);
6695 case Intrinsic::lifetime_start
:
6696 case Intrinsic::lifetime_end
: {
6697 bool IsStart
= (Intrinsic
== Intrinsic::lifetime_start
);
6698 // Stack coloring is not enabled in O0, discard region information.
6699 if (TM
.getOptLevel() == CodeGenOpt::None
)
6702 const int64_t ObjectSize
=
6703 cast
<ConstantInt
>(I
.getArgOperand(0))->getSExtValue();
6704 Value
*const ObjectPtr
= I
.getArgOperand(1);
6705 SmallVector
<const Value
*, 4> Allocas
;
6706 GetUnderlyingObjects(ObjectPtr
, Allocas
, *DL
);
6708 for (SmallVectorImpl
<const Value
*>::iterator Object
= Allocas
.begin(),
6709 E
= Allocas
.end(); Object
!= E
; ++Object
) {
6710 const AllocaInst
*LifetimeObject
= dyn_cast_or_null
<AllocaInst
>(*Object
);
6712 // Could not find an Alloca.
6713 if (!LifetimeObject
)
6716 // First check that the Alloca is static, otherwise it won't have a
6717 // valid frame index.
6718 auto SI
= FuncInfo
.StaticAllocaMap
.find(LifetimeObject
);
6719 if (SI
== FuncInfo
.StaticAllocaMap
.end())
6722 const int FrameIndex
= SI
->second
;
6724 if (GetPointerBaseWithConstantOffset(
6725 ObjectPtr
, Offset
, DAG
.getDataLayout()) != LifetimeObject
)
6726 Offset
= -1; // Cannot determine offset from alloca to lifetime object.
6727 Res
= DAG
.getLifetimeNode(IsStart
, sdl
, getRoot(), FrameIndex
, ObjectSize
,
6733 case Intrinsic::invariant_start
:
6734 // Discard region information.
6735 setValue(&I
, DAG
.getUNDEF(TLI
.getPointerTy(DAG
.getDataLayout())));
6737 case Intrinsic::invariant_end
:
6738 // Discard region information.
6740 case Intrinsic::clear_cache
:
6741 /// FunctionName may be null.
6742 if (const char *FunctionName
= TLI
.getClearCacheBuiltinName())
6743 lowerCallToExternalSymbol(I
, FunctionName
);
6745 case Intrinsic::donothing
:
6748 case Intrinsic::experimental_stackmap
:
6751 case Intrinsic::experimental_patchpoint_void
:
6752 case Intrinsic::experimental_patchpoint_i64
:
6753 visitPatchpoint(&I
);
6755 case Intrinsic::experimental_gc_statepoint
:
6756 LowerStatepoint(ImmutableStatepoint(&I
));
6758 case Intrinsic::experimental_gc_result
:
6759 visitGCResult(cast
<GCResultInst
>(I
));
6761 case Intrinsic::experimental_gc_relocate
:
6762 visitGCRelocate(cast
<GCRelocateInst
>(I
));
6764 case Intrinsic::instrprof_increment
:
6765 llvm_unreachable("instrprof failed to lower an increment");
6766 case Intrinsic::instrprof_value_profile
:
6767 llvm_unreachable("instrprof failed to lower a value profiling call");
6768 case Intrinsic::localescape
: {
6769 MachineFunction
&MF
= DAG
.getMachineFunction();
6770 const TargetInstrInfo
*TII
= DAG
.getSubtarget().getInstrInfo();
6772 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6773 // is the same on all targets.
6774 for (unsigned Idx
= 0, E
= I
.getNumArgOperands(); Idx
< E
; ++Idx
) {
6775 Value
*Arg
= I
.getArgOperand(Idx
)->stripPointerCasts();
6776 if (isa
<ConstantPointerNull
>(Arg
))
6777 continue; // Skip null pointers. They represent a hole in index space.
6778 AllocaInst
*Slot
= cast
<AllocaInst
>(Arg
);
6779 assert(FuncInfo
.StaticAllocaMap
.count(Slot
) &&
6780 "can only escape static allocas");
6781 int FI
= FuncInfo
.StaticAllocaMap
[Slot
];
6782 MCSymbol
*FrameAllocSym
=
6783 MF
.getMMI().getContext().getOrCreateFrameAllocSymbol(
6784 GlobalValue::dropLLVMManglingEscape(MF
.getName()), Idx
);
6785 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, dl
,
6786 TII
->get(TargetOpcode::LOCAL_ESCAPE
))
6787 .addSym(FrameAllocSym
)
6794 case Intrinsic::localrecover
: {
6795 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6796 MachineFunction
&MF
= DAG
.getMachineFunction();
6797 MVT PtrVT
= TLI
.getPointerTy(DAG
.getDataLayout(), 0);
6799 // Get the symbol that defines the frame offset.
6800 auto *Fn
= cast
<Function
>(I
.getArgOperand(0)->stripPointerCasts());
6801 auto *Idx
= cast
<ConstantInt
>(I
.getArgOperand(2));
6803 unsigned(Idx
->getLimitedValue(std::numeric_limits
<int>::max()));
6804 MCSymbol
*FrameAllocSym
=
6805 MF
.getMMI().getContext().getOrCreateFrameAllocSymbol(
6806 GlobalValue::dropLLVMManglingEscape(Fn
->getName()), IdxVal
);
6808 // Create a MCSymbol for the label to avoid any target lowering
6809 // that would make this PC relative.
6810 SDValue OffsetSym
= DAG
.getMCSymbol(FrameAllocSym
, PtrVT
);
6812 DAG
.getNode(ISD::LOCAL_RECOVER
, sdl
, PtrVT
, OffsetSym
);
6814 // Add the offset to the FP.
6815 Value
*FP
= I
.getArgOperand(1);
6816 SDValue FPVal
= getValue(FP
);
6817 SDValue Add
= DAG
.getMemBasePlusOffset(FPVal
, OffsetVal
, sdl
);
6823 case Intrinsic::eh_exceptionpointer
:
6824 case Intrinsic::eh_exceptioncode
: {
6825 // Get the exception pointer vreg, copy from it, and resize it to fit.
6826 const auto *CPI
= cast
<CatchPadInst
>(I
.getArgOperand(0));
6827 MVT PtrVT
= TLI
.getPointerTy(DAG
.getDataLayout());
6828 const TargetRegisterClass
*PtrRC
= TLI
.getRegClassFor(PtrVT
);
6829 unsigned VReg
= FuncInfo
.getCatchPadExceptionPointerVReg(CPI
, PtrRC
);
6831 DAG
.getCopyFromReg(DAG
.getEntryNode(), getCurSDLoc(), VReg
, PtrVT
);
6832 if (Intrinsic
== Intrinsic::eh_exceptioncode
)
6833 N
= DAG
.getZExtOrTrunc(N
, getCurSDLoc(), MVT::i32
);
6837 case Intrinsic::xray_customevent
: {
6838 // Here we want to make sure that the intrinsic behaves as if it has a
6839 // specific calling convention, and only for x86_64.
6840 // FIXME: Support other platforms later.
6841 const auto &Triple
= DAG
.getTarget().getTargetTriple();
6842 if (Triple
.getArch() != Triple::x86_64
|| !Triple
.isOSLinux())
6845 SDLoc DL
= getCurSDLoc();
6846 SmallVector
<SDValue
, 8> Ops
;
6848 // We want to say that we always want the arguments in registers.
6849 SDValue LogEntryVal
= getValue(I
.getArgOperand(0));
6850 SDValue StrSizeVal
= getValue(I
.getArgOperand(1));
6851 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
6852 SDValue Chain
= getRoot();
6853 Ops
.push_back(LogEntryVal
);
6854 Ops
.push_back(StrSizeVal
);
6855 Ops
.push_back(Chain
);
6857 // We need to enforce the calling convention for the callsite, so that
6858 // argument ordering is enforced correctly, and that register allocation can
6859 // see that some registers may be assumed clobbered and have to preserve
6860 // them across calls to the intrinsic.
6861 MachineSDNode
*MN
= DAG
.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL
,
6863 SDValue patchableNode
= SDValue(MN
, 0);
6864 DAG
.setRoot(patchableNode
);
6865 setValue(&I
, patchableNode
);
6868 case Intrinsic::xray_typedevent
: {
6869 // Here we want to make sure that the intrinsic behaves as if it has a
6870 // specific calling convention, and only for x86_64.
6871 // FIXME: Support other platforms later.
6872 const auto &Triple
= DAG
.getTarget().getTargetTriple();
6873 if (Triple
.getArch() != Triple::x86_64
|| !Triple
.isOSLinux())
6876 SDLoc DL
= getCurSDLoc();
6877 SmallVector
<SDValue
, 8> Ops
;
6879 // We want to say that we always want the arguments in registers.
6880 // It's unclear to me how manipulating the selection DAG here forces callers
6881 // to provide arguments in registers instead of on the stack.
6882 SDValue LogTypeId
= getValue(I
.getArgOperand(0));
6883 SDValue LogEntryVal
= getValue(I
.getArgOperand(1));
6884 SDValue StrSizeVal
= getValue(I
.getArgOperand(2));
6885 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
6886 SDValue Chain
= getRoot();
6887 Ops
.push_back(LogTypeId
);
6888 Ops
.push_back(LogEntryVal
);
6889 Ops
.push_back(StrSizeVal
);
6890 Ops
.push_back(Chain
);
6892 // We need to enforce the calling convention for the callsite, so that
6893 // argument ordering is enforced correctly, and that register allocation can
6894 // see that some registers may be assumed clobbered and have to preserve
6895 // them across calls to the intrinsic.
6896 MachineSDNode
*MN
= DAG
.getMachineNode(
6897 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL
, DL
, NodeTys
, Ops
);
6898 SDValue patchableNode
= SDValue(MN
, 0);
6899 DAG
.setRoot(patchableNode
);
6900 setValue(&I
, patchableNode
);
6903 case Intrinsic::experimental_deoptimize
:
6904 LowerDeoptimizeCall(&I
);
6907 case Intrinsic::experimental_vector_reduce_v2_fadd
:
6908 case Intrinsic::experimental_vector_reduce_v2_fmul
:
6909 case Intrinsic::experimental_vector_reduce_add
:
6910 case Intrinsic::experimental_vector_reduce_mul
:
6911 case Intrinsic::experimental_vector_reduce_and
:
6912 case Intrinsic::experimental_vector_reduce_or
:
6913 case Intrinsic::experimental_vector_reduce_xor
:
6914 case Intrinsic::experimental_vector_reduce_smax
:
6915 case Intrinsic::experimental_vector_reduce_smin
:
6916 case Intrinsic::experimental_vector_reduce_umax
:
6917 case Intrinsic::experimental_vector_reduce_umin
:
6918 case Intrinsic::experimental_vector_reduce_fmax
:
6919 case Intrinsic::experimental_vector_reduce_fmin
:
6920 visitVectorReduce(I
, Intrinsic
);
6923 case Intrinsic::icall_branch_funnel
: {
6924 SmallVector
<SDValue
, 16> Ops
;
6925 Ops
.push_back(getValue(I
.getArgOperand(0)));
6928 auto *Base
= dyn_cast
<GlobalObject
>(GetPointerBaseWithConstantOffset(
6929 I
.getArgOperand(1), Offset
, DAG
.getDataLayout()));
6932 "llvm.icall.branch.funnel operand must be a GlobalValue");
6933 Ops
.push_back(DAG
.getTargetGlobalAddress(Base
, getCurSDLoc(), MVT::i64
, 0));
6935 struct BranchFunnelTarget
{
6939 SmallVector
<BranchFunnelTarget
, 8> Targets
;
6941 for (unsigned Op
= 1, N
= I
.getNumArgOperands(); Op
!= N
; Op
+= 2) {
6942 auto *ElemBase
= dyn_cast
<GlobalObject
>(GetPointerBaseWithConstantOffset(
6943 I
.getArgOperand(Op
), Offset
, DAG
.getDataLayout()));
6944 if (ElemBase
!= Base
)
6945 report_fatal_error("all llvm.icall.branch.funnel operands must refer "
6946 "to the same GlobalValue");
6948 SDValue Val
= getValue(I
.getArgOperand(Op
+ 1));
6949 auto *GA
= dyn_cast
<GlobalAddressSDNode
>(Val
);
6952 "llvm.icall.branch.funnel operand must be a GlobalValue");
6953 Targets
.push_back({Offset
, DAG
.getTargetGlobalAddress(
6954 GA
->getGlobal(), getCurSDLoc(),
6955 Val
.getValueType(), GA
->getOffset())});
6958 [](const BranchFunnelTarget
&T1
, const BranchFunnelTarget
&T2
) {
6959 return T1
.Offset
< T2
.Offset
;
6962 for (auto &T
: Targets
) {
6963 Ops
.push_back(DAG
.getTargetConstant(T
.Offset
, getCurSDLoc(), MVT::i32
));
6964 Ops
.push_back(T
.Target
);
6967 Ops
.push_back(DAG
.getRoot()); // Chain
6968 SDValue
N(DAG
.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL
,
6969 getCurSDLoc(), MVT::Other
, Ops
),
6977 case Intrinsic::wasm_landingpad_index
:
6978 // Information this intrinsic contained has been transferred to
6979 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
6983 case Intrinsic::aarch64_settag
:
6984 case Intrinsic::aarch64_settag_zero
: {
6985 const SelectionDAGTargetInfo
&TSI
= DAG
.getSelectionDAGInfo();
6986 bool ZeroMemory
= Intrinsic
== Intrinsic::aarch64_settag_zero
;
6987 SDValue Val
= TSI
.EmitTargetCodeForSetTag(
6988 DAG
, getCurSDLoc(), getRoot(), getValue(I
.getArgOperand(0)),
6989 getValue(I
.getArgOperand(1)), MachinePointerInfo(I
.getArgOperand(0)),
6995 case Intrinsic::ptrmask
: {
6996 SDValue Ptr
= getValue(I
.getOperand(0));
6997 SDValue Const
= getValue(I
.getOperand(1));
7000 EVT(DAG
.getTargetLoweringInfo().getPointerTy(DAG
.getDataLayout()));
7002 setValue(&I
, DAG
.getNode(ISD::AND
, getCurSDLoc(), DestVT
, Ptr
,
7003 DAG
.getZExtOrTrunc(Const
, getCurSDLoc(), DestVT
)));
7009 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7010 const ConstrainedFPIntrinsic
&FPI
) {
7011 SDLoc sdl
= getCurSDLoc();
7013 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
7014 SmallVector
<EVT
, 4> ValueVTs
;
7015 ComputeValueVTs(TLI
, DAG
.getDataLayout(), FPI
.getType(), ValueVTs
);
7016 ValueVTs
.push_back(MVT::Other
); // Out chain
7018 // We do not need to serialize constrained FP intrinsics against
7019 // each other or against (nonvolatile) loads, so they can be
7020 // chained like loads.
7021 SDValue Chain
= DAG
.getRoot();
7022 SmallVector
<SDValue
, 4> Opers
;
7023 Opers
.push_back(Chain
);
7024 if (FPI
.isUnaryOp()) {
7025 Opers
.push_back(getValue(FPI
.getArgOperand(0)));
7026 } else if (FPI
.isTernaryOp()) {
7027 Opers
.push_back(getValue(FPI
.getArgOperand(0)));
7028 Opers
.push_back(getValue(FPI
.getArgOperand(1)));
7029 Opers
.push_back(getValue(FPI
.getArgOperand(2)));
7031 Opers
.push_back(getValue(FPI
.getArgOperand(0)));
7032 Opers
.push_back(getValue(FPI
.getArgOperand(1)));
7036 switch (FPI
.getIntrinsicID()) {
7037 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
7038 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
7039 case Intrinsic::INTRINSIC: \
7040 Opcode = ISD::STRICT_##DAGN; \
7042 #include "llvm/IR/ConstrainedOps.def"
7045 // A few strict DAG nodes carry additional operands that are not
7046 // set up by the default code above.
7049 case ISD::STRICT_FP_ROUND
:
7051 DAG
.getTargetConstant(0, sdl
, TLI
.getPointerTy(DAG
.getDataLayout())));
7053 case ISD::STRICT_FSETCC
:
7054 case ISD::STRICT_FSETCCS
: {
7055 auto *FPCmp
= dyn_cast
<ConstrainedFPCmpIntrinsic
>(&FPI
);
7056 Opers
.push_back(DAG
.getCondCode(getFCmpCondCode(FPCmp
->getPredicate())));
7061 SDVTList VTs
= DAG
.getVTList(ValueVTs
);
7062 SDValue Result
= DAG
.getNode(Opcode
, sdl
, VTs
, Opers
);
7064 assert(Result
.getNode()->getNumValues() == 2);
7066 // Push node to the appropriate list so that future instructions can be
7067 // chained up correctly.
7068 SDValue OutChain
= Result
.getValue(1);
7069 switch (FPI
.getExceptionBehavior().getValue()) {
7070 case fp::ExceptionBehavior::ebIgnore
:
7071 // The only reason why ebIgnore nodes still need to be chained is that
7072 // they might depend on the current rounding mode, and therefore must
7073 // not be moved across instruction that may change that mode.
7075 case fp::ExceptionBehavior::ebMayTrap
:
7076 // These must not be moved across calls or instructions that may change
7077 // floating-point exception masks.
7078 PendingConstrainedFP
.push_back(OutChain
);
7080 case fp::ExceptionBehavior::ebStrict
:
7081 // These must not be moved across calls or instructions that may change
7082 // floating-point exception masks or read floating-point exception flags.
7083 // In addition, they cannot be optimized out even if unused.
7084 PendingConstrainedFPStrict
.push_back(OutChain
);
7088 SDValue FPResult
= Result
.getValue(0);
7089 setValue(&FPI
, FPResult
);
7092 std::pair
<SDValue
, SDValue
>
7093 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo
&CLI
,
7094 const BasicBlock
*EHPadBB
) {
7095 MachineFunction
&MF
= DAG
.getMachineFunction();
7096 MachineModuleInfo
&MMI
= MF
.getMMI();
7097 MCSymbol
*BeginLabel
= nullptr;
7100 // Insert a label before the invoke call to mark the try range. This can be
7101 // used to detect deletion of the invoke via the MachineModuleInfo.
7102 BeginLabel
= MMI
.getContext().createTempSymbol();
7104 // For SjLj, keep track of which landing pads go with which invokes
7105 // so as to maintain the ordering of pads in the LSDA.
7106 unsigned CallSiteIndex
= MMI
.getCurrentCallSite();
7107 if (CallSiteIndex
) {
7108 MF
.setCallSiteBeginLabel(BeginLabel
, CallSiteIndex
);
7109 LPadToCallSiteMap
[FuncInfo
.MBBMap
[EHPadBB
]].push_back(CallSiteIndex
);
7111 // Now that the call site is handled, stop tracking it.
7112 MMI
.setCurrentCallSite(0);
7115 // Both PendingLoads and PendingExports must be flushed here;
7116 // this call might not return.
7118 DAG
.setRoot(DAG
.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel
));
7120 CLI
.setChain(getRoot());
7122 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
7123 std::pair
<SDValue
, SDValue
> Result
= TLI
.LowerCallTo(CLI
);
7125 assert((CLI
.IsTailCall
|| Result
.second
.getNode()) &&
7126 "Non-null chain expected with non-tail call!");
7127 assert((Result
.second
.getNode() || !Result
.first
.getNode()) &&
7128 "Null value expected with tail call!");
7130 if (!Result
.second
.getNode()) {
7131 // As a special case, a null chain means that a tail call has been emitted
7132 // and the DAG root is already updated.
7135 // Since there's no actual continuation from this block, nothing can be
7136 // relying on us setting vregs for them.
7137 PendingExports
.clear();
7139 DAG
.setRoot(Result
.second
);
7143 // Insert a label at the end of the invoke call to mark the try range. This
7144 // can be used to detect deletion of the invoke via the MachineModuleInfo.
7145 MCSymbol
*EndLabel
= MMI
.getContext().createTempSymbol();
7146 DAG
.setRoot(DAG
.getEHLabel(getCurSDLoc(), getRoot(), EndLabel
));
7148 // Inform MachineModuleInfo of range.
7149 auto Pers
= classifyEHPersonality(FuncInfo
.Fn
->getPersonalityFn());
7150 // There is a platform (e.g. wasm) that uses funclet style IR but does not
7151 // actually use outlined funclets and their LSDA info style.
7152 if (MF
.hasEHFunclets() && isFuncletEHPersonality(Pers
)) {
7154 WinEHFuncInfo
*EHInfo
= DAG
.getMachineFunction().getWinEHFuncInfo();
7155 EHInfo
->addIPToStateRange(cast
<InvokeInst
>(CLI
.CS
.getInstruction()),
7156 BeginLabel
, EndLabel
);
7157 } else if (!isScopedEHPersonality(Pers
)) {
7158 MF
.addInvoke(FuncInfo
.MBBMap
[EHPadBB
], BeginLabel
, EndLabel
);
7165 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS
, SDValue Callee
,
7167 const BasicBlock
*EHPadBB
) {
7168 auto &DL
= DAG
.getDataLayout();
7169 FunctionType
*FTy
= CS
.getFunctionType();
7170 Type
*RetTy
= CS
.getType();
7172 TargetLowering::ArgListTy Args
;
7173 Args
.reserve(CS
.arg_size());
7175 const Value
*SwiftErrorVal
= nullptr;
7176 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
7179 // Avoid emitting tail calls in functions with the disable-tail-calls
7181 auto *Caller
= CS
.getInstruction()->getParent()->getParent();
7182 if (Caller
->getFnAttribute("disable-tail-calls").getValueAsString() ==
7186 // We can't tail call inside a function with a swifterror argument. Lowering
7187 // does not support this yet. It would have to move into the swifterror
7188 // register before the call.
7189 if (TLI
.supportSwiftError() &&
7190 Caller
->getAttributes().hasAttrSomewhere(Attribute::SwiftError
))
7194 for (ImmutableCallSite::arg_iterator i
= CS
.arg_begin(), e
= CS
.arg_end();
7196 TargetLowering::ArgListEntry Entry
;
7197 const Value
*V
= *i
;
7200 if (V
->getType()->isEmptyTy())
7203 SDValue ArgNode
= getValue(V
);
7204 Entry
.Node
= ArgNode
; Entry
.Ty
= V
->getType();
7206 Entry
.setAttributes(&CS
, i
- CS
.arg_begin());
7208 // Use swifterror virtual register as input to the call.
7209 if (Entry
.IsSwiftError
&& TLI
.supportSwiftError()) {
7211 // We find the virtual register for the actual swifterror argument.
7212 // Instead of using the Value, we use the virtual register instead.
7213 Entry
.Node
= DAG
.getRegister(
7214 SwiftError
.getOrCreateVRegUseAt(CS
.getInstruction(), FuncInfo
.MBB
, V
),
7215 EVT(TLI
.getPointerTy(DL
)));
7218 Args
.push_back(Entry
);
7220 // If we have an explicit sret argument that is an Instruction, (i.e., it
7221 // might point to function-local memory), we can't meaningfully tail-call.
7222 if (Entry
.IsSRet
&& isa
<Instruction
>(V
))
7226 // If call site has a cfguardtarget operand bundle, create and add an
7227 // additional ArgListEntry.
7228 if (auto Bundle
= CS
.getOperandBundle(LLVMContext::OB_cfguardtarget
)) {
7229 TargetLowering::ArgListEntry Entry
;
7230 Value
*V
= Bundle
->Inputs
[0];
7231 SDValue ArgNode
= getValue(V
);
7232 Entry
.Node
= ArgNode
;
7233 Entry
.Ty
= V
->getType();
7234 Entry
.IsCFGuardTarget
= true;
7235 Args
.push_back(Entry
);
7238 // Check if target-independent constraints permit a tail call here.
7239 // Target-dependent constraints are checked within TLI->LowerCallTo.
7240 if (isTailCall
&& !isInTailCallPosition(CS
, DAG
.getTarget()))
7243 // Disable tail calls if there is an swifterror argument. Targets have not
7244 // been updated to support tail calls.
7245 if (TLI
.supportSwiftError() && SwiftErrorVal
)
7248 TargetLowering::CallLoweringInfo
CLI(DAG
);
7249 CLI
.setDebugLoc(getCurSDLoc())
7250 .setChain(getRoot())
7251 .setCallee(RetTy
, FTy
, Callee
, std::move(Args
), CS
)
7252 .setTailCall(isTailCall
)
7253 .setConvergent(CS
.isConvergent());
7254 std::pair
<SDValue
, SDValue
> Result
= lowerInvokable(CLI
, EHPadBB
);
7256 if (Result
.first
.getNode()) {
7257 const Instruction
*Inst
= CS
.getInstruction();
7258 Result
.first
= lowerRangeToAssertZExt(DAG
, *Inst
, Result
.first
);
7259 setValue(Inst
, Result
.first
);
7262 // The last element of CLI.InVals has the SDValue for swifterror return.
7263 // Here we copy it to a virtual register and update SwiftErrorMap for
7265 if (SwiftErrorVal
&& TLI
.supportSwiftError()) {
7266 // Get the last element of InVals.
7267 SDValue Src
= CLI
.InVals
.back();
7268 Register VReg
= SwiftError
.getOrCreateVRegDefAt(
7269 CS
.getInstruction(), FuncInfo
.MBB
, SwiftErrorVal
);
7270 SDValue CopyNode
= CLI
.DAG
.getCopyToReg(Result
.second
, CLI
.DL
, VReg
, Src
);
7271 DAG
.setRoot(CopyNode
);
7275 static SDValue
getMemCmpLoad(const Value
*PtrVal
, MVT LoadVT
,
7276 SelectionDAGBuilder
&Builder
) {
7277 // Check to see if this load can be trivially constant folded, e.g. if the
7278 // input is from a string literal.
7279 if (const Constant
*LoadInput
= dyn_cast
<Constant
>(PtrVal
)) {
7280 // Cast pointer to the type we really want to load.
7282 Type::getIntNTy(PtrVal
->getContext(), LoadVT
.getScalarSizeInBits());
7283 if (LoadVT
.isVector())
7284 LoadTy
= VectorType::get(LoadTy
, LoadVT
.getVectorNumElements());
7286 LoadInput
= ConstantExpr::getBitCast(const_cast<Constant
*>(LoadInput
),
7287 PointerType::getUnqual(LoadTy
));
7289 if (const Constant
*LoadCst
= ConstantFoldLoadFromConstPtr(
7290 const_cast<Constant
*>(LoadInput
), LoadTy
, *Builder
.DL
))
7291 return Builder
.getValue(LoadCst
);
7294 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
7295 // still constant memory, the input chain can be the entry node.
7297 bool ConstantMemory
= false;
7299 // Do not serialize (non-volatile) loads of constant memory with anything.
7300 if (Builder
.AA
&& Builder
.AA
->pointsToConstantMemory(PtrVal
)) {
7301 Root
= Builder
.DAG
.getEntryNode();
7302 ConstantMemory
= true;
7304 // Do not serialize non-volatile loads against each other.
7305 Root
= Builder
.DAG
.getRoot();
7308 SDValue Ptr
= Builder
.getValue(PtrVal
);
7309 SDValue LoadVal
= Builder
.DAG
.getLoad(LoadVT
, Builder
.getCurSDLoc(), Root
,
7310 Ptr
, MachinePointerInfo(PtrVal
),
7311 /* Alignment = */ 1);
7313 if (!ConstantMemory
)
7314 Builder
.PendingLoads
.push_back(LoadVal
.getValue(1));
7318 /// Record the value for an instruction that produces an integer result,
7319 /// converting the type where necessary.
7320 void SelectionDAGBuilder::processIntegerCallValue(const Instruction
&I
,
7323 EVT VT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
7326 Value
= DAG
.getSExtOrTrunc(Value
, getCurSDLoc(), VT
);
7328 Value
= DAG
.getZExtOrTrunc(Value
, getCurSDLoc(), VT
);
7329 setValue(&I
, Value
);
7332 /// See if we can lower a memcmp call into an optimized form. If so, return
7333 /// true and lower it. Otherwise return false, and it will be lowered like a
7335 /// The caller already checked that \p I calls the appropriate LibFunc with a
7336 /// correct prototype.
7337 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst
&I
) {
7338 const Value
*LHS
= I
.getArgOperand(0), *RHS
= I
.getArgOperand(1);
7339 const Value
*Size
= I
.getArgOperand(2);
7340 const ConstantInt
*CSize
= dyn_cast
<ConstantInt
>(Size
);
7341 if (CSize
&& CSize
->getZExtValue() == 0) {
7342 EVT CallVT
= DAG
.getTargetLoweringInfo().getValueType(DAG
.getDataLayout(),
7344 setValue(&I
, DAG
.getConstant(0, getCurSDLoc(), CallVT
));
7348 const SelectionDAGTargetInfo
&TSI
= DAG
.getSelectionDAGInfo();
7349 std::pair
<SDValue
, SDValue
> Res
= TSI
.EmitTargetCodeForMemcmp(
7350 DAG
, getCurSDLoc(), DAG
.getRoot(), getValue(LHS
), getValue(RHS
),
7351 getValue(Size
), MachinePointerInfo(LHS
), MachinePointerInfo(RHS
));
7352 if (Res
.first
.getNode()) {
7353 processIntegerCallValue(I
, Res
.first
, true);
7354 PendingLoads
.push_back(Res
.second
);
7358 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
7359 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
7360 if (!CSize
|| !isOnlyUsedInZeroEqualityComparison(&I
))
7363 // If the target has a fast compare for the given size, it will return a
7364 // preferred load type for that size. Require that the load VT is legal and
7365 // that the target supports unaligned loads of that type. Otherwise, return
7367 auto hasFastLoadsAndCompare
= [&](unsigned NumBits
) {
7368 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
7369 MVT LVT
= TLI
.hasFastEqualityCompare(NumBits
);
7370 if (LVT
!= MVT::INVALID_SIMPLE_VALUE_TYPE
) {
7371 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
7372 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
7373 // TODO: Check alignment of src and dest ptrs.
7374 unsigned DstAS
= LHS
->getType()->getPointerAddressSpace();
7375 unsigned SrcAS
= RHS
->getType()->getPointerAddressSpace();
7376 if (!TLI
.isTypeLegal(LVT
) ||
7377 !TLI
.allowsMisalignedMemoryAccesses(LVT
, SrcAS
) ||
7378 !TLI
.allowsMisalignedMemoryAccesses(LVT
, DstAS
))
7379 LVT
= MVT::INVALID_SIMPLE_VALUE_TYPE
;
7385 // This turns into unaligned loads. We only do this if the target natively
7386 // supports the MVT we'll be loading or if it is small enough (<= 4) that
7387 // we'll only produce a small number of byte loads.
7389 unsigned NumBitsToCompare
= CSize
->getZExtValue() * 8;
7390 switch (NumBitsToCompare
) {
7402 LoadVT
= hasFastLoadsAndCompare(NumBitsToCompare
);
7406 if (LoadVT
== MVT::INVALID_SIMPLE_VALUE_TYPE
)
7409 SDValue LoadL
= getMemCmpLoad(LHS
, LoadVT
, *this);
7410 SDValue LoadR
= getMemCmpLoad(RHS
, LoadVT
, *this);
7412 // Bitcast to a wide integer type if the loads are vectors.
7413 if (LoadVT
.isVector()) {
7414 EVT CmpVT
= EVT::getIntegerVT(LHS
->getContext(), LoadVT
.getSizeInBits());
7415 LoadL
= DAG
.getBitcast(CmpVT
, LoadL
);
7416 LoadR
= DAG
.getBitcast(CmpVT
, LoadR
);
7419 SDValue Cmp
= DAG
.getSetCC(getCurSDLoc(), MVT::i1
, LoadL
, LoadR
, ISD::SETNE
);
7420 processIntegerCallValue(I
, Cmp
, false);
7424 /// See if we can lower a memchr call into an optimized form. If so, return
7425 /// true and lower it. Otherwise return false, and it will be lowered like a
7427 /// The caller already checked that \p I calls the appropriate LibFunc with a
7428 /// correct prototype.
7429 bool SelectionDAGBuilder::visitMemChrCall(const CallInst
&I
) {
7430 const Value
*Src
= I
.getArgOperand(0);
7431 const Value
*Char
= I
.getArgOperand(1);
7432 const Value
*Length
= I
.getArgOperand(2);
7434 const SelectionDAGTargetInfo
&TSI
= DAG
.getSelectionDAGInfo();
7435 std::pair
<SDValue
, SDValue
> Res
=
7436 TSI
.EmitTargetCodeForMemchr(DAG
, getCurSDLoc(), DAG
.getRoot(),
7437 getValue(Src
), getValue(Char
), getValue(Length
),
7438 MachinePointerInfo(Src
));
7439 if (Res
.first
.getNode()) {
7440 setValue(&I
, Res
.first
);
7441 PendingLoads
.push_back(Res
.second
);
7448 /// See if we can lower a mempcpy call into an optimized form. If so, return
7449 /// true and lower it. Otherwise return false, and it will be lowered like a
7451 /// The caller already checked that \p I calls the appropriate LibFunc with a
7452 /// correct prototype.
7453 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst
&I
) {
7454 SDValue Dst
= getValue(I
.getArgOperand(0));
7455 SDValue Src
= getValue(I
.getArgOperand(1));
7456 SDValue Size
= getValue(I
.getArgOperand(2));
7458 unsigned DstAlign
= DAG
.InferPtrAlignment(Dst
);
7459 unsigned SrcAlign
= DAG
.InferPtrAlignment(Src
);
7460 unsigned Align
= std::min(DstAlign
, SrcAlign
);
7461 if (Align
== 0) // Alignment of one or both could not be inferred.
7462 Align
= 1; // 0 and 1 both specify no alignment, but 0 is reserved.
7465 SDLoc sdl
= getCurSDLoc();
7467 // In the mempcpy context we need to pass in a false value for isTailCall
7468 // because the return pointer needs to be adjusted by the size of
7469 // the copied memory.
7470 SDValue Root
= isVol
? getRoot() : getMemoryRoot();
7471 SDValue MC
= DAG
.getMemcpy(Root
, sdl
, Dst
, Src
, Size
, Align
, isVol
,
7472 false, /*isTailCall=*/false,
7473 MachinePointerInfo(I
.getArgOperand(0)),
7474 MachinePointerInfo(I
.getArgOperand(1)));
7475 assert(MC
.getNode() != nullptr &&
7476 "** memcpy should not be lowered as TailCall in mempcpy context **");
7479 // Check if Size needs to be truncated or extended.
7480 Size
= DAG
.getSExtOrTrunc(Size
, sdl
, Dst
.getValueType());
7482 // Adjust return pointer to point just past the last dst byte.
7483 SDValue DstPlusSize
= DAG
.getNode(ISD::ADD
, sdl
, Dst
.getValueType(),
7485 setValue(&I
, DstPlusSize
);
7489 /// See if we can lower a strcpy call into an optimized form. If so, return
7490 /// true and lower it, otherwise return false and it will be lowered like a
7492 /// The caller already checked that \p I calls the appropriate LibFunc with a
7493 /// correct prototype.
7494 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst
&I
, bool isStpcpy
) {
7495 const Value
*Arg0
= I
.getArgOperand(0), *Arg1
= I
.getArgOperand(1);
7497 const SelectionDAGTargetInfo
&TSI
= DAG
.getSelectionDAGInfo();
7498 std::pair
<SDValue
, SDValue
> Res
=
7499 TSI
.EmitTargetCodeForStrcpy(DAG
, getCurSDLoc(), getRoot(),
7500 getValue(Arg0
), getValue(Arg1
),
7501 MachinePointerInfo(Arg0
),
7502 MachinePointerInfo(Arg1
), isStpcpy
);
7503 if (Res
.first
.getNode()) {
7504 setValue(&I
, Res
.first
);
7505 DAG
.setRoot(Res
.second
);
7512 /// See if we can lower a strcmp call into an optimized form. If so, return
7513 /// true and lower it, otherwise return false and it will be lowered like a
7515 /// The caller already checked that \p I calls the appropriate LibFunc with a
7516 /// correct prototype.
7517 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst
&I
) {
7518 const Value
*Arg0
= I
.getArgOperand(0), *Arg1
= I
.getArgOperand(1);
7520 const SelectionDAGTargetInfo
&TSI
= DAG
.getSelectionDAGInfo();
7521 std::pair
<SDValue
, SDValue
> Res
=
7522 TSI
.EmitTargetCodeForStrcmp(DAG
, getCurSDLoc(), DAG
.getRoot(),
7523 getValue(Arg0
), getValue(Arg1
),
7524 MachinePointerInfo(Arg0
),
7525 MachinePointerInfo(Arg1
));
7526 if (Res
.first
.getNode()) {
7527 processIntegerCallValue(I
, Res
.first
, true);
7528 PendingLoads
.push_back(Res
.second
);
7535 /// See if we can lower a strlen call into an optimized form. If so, return
7536 /// true and lower it, otherwise return false and it will be lowered like a
7538 /// The caller already checked that \p I calls the appropriate LibFunc with a
7539 /// correct prototype.
7540 bool SelectionDAGBuilder::visitStrLenCall(const CallInst
&I
) {
7541 const Value
*Arg0
= I
.getArgOperand(0);
7543 const SelectionDAGTargetInfo
&TSI
= DAG
.getSelectionDAGInfo();
7544 std::pair
<SDValue
, SDValue
> Res
=
7545 TSI
.EmitTargetCodeForStrlen(DAG
, getCurSDLoc(), DAG
.getRoot(),
7546 getValue(Arg0
), MachinePointerInfo(Arg0
));
7547 if (Res
.first
.getNode()) {
7548 processIntegerCallValue(I
, Res
.first
, false);
7549 PendingLoads
.push_back(Res
.second
);
7556 /// See if we can lower a strnlen call into an optimized form. If so, return
7557 /// true and lower it, otherwise return false and it will be lowered like a
7559 /// The caller already checked that \p I calls the appropriate LibFunc with a
7560 /// correct prototype.
7561 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst
&I
) {
7562 const Value
*Arg0
= I
.getArgOperand(0), *Arg1
= I
.getArgOperand(1);
7564 const SelectionDAGTargetInfo
&TSI
= DAG
.getSelectionDAGInfo();
7565 std::pair
<SDValue
, SDValue
> Res
=
7566 TSI
.EmitTargetCodeForStrnlen(DAG
, getCurSDLoc(), DAG
.getRoot(),
7567 getValue(Arg0
), getValue(Arg1
),
7568 MachinePointerInfo(Arg0
));
7569 if (Res
.first
.getNode()) {
7570 processIntegerCallValue(I
, Res
.first
, false);
7571 PendingLoads
.push_back(Res
.second
);
7578 /// See if we can lower a unary floating-point operation into an SDNode with
7579 /// the specified Opcode. If so, return true and lower it, otherwise return
7580 /// false and it will be lowered like a normal call.
7581 /// The caller already checked that \p I calls the appropriate LibFunc with a
7582 /// correct prototype.
7583 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst
&I
,
7585 // We already checked this call's prototype; verify it doesn't modify errno.
7586 if (!I
.onlyReadsMemory())
7589 SDValue Tmp
= getValue(I
.getArgOperand(0));
7590 setValue(&I
, DAG
.getNode(Opcode
, getCurSDLoc(), Tmp
.getValueType(), Tmp
));
7594 /// See if we can lower a binary floating-point operation into an SDNode with
7595 /// the specified Opcode. If so, return true and lower it. Otherwise return
7596 /// false, and it will be lowered like a normal call.
7597 /// The caller already checked that \p I calls the appropriate LibFunc with a
7598 /// correct prototype.
7599 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst
&I
,
7601 // We already checked this call's prototype; verify it doesn't modify errno.
7602 if (!I
.onlyReadsMemory())
7605 SDValue Tmp0
= getValue(I
.getArgOperand(0));
7606 SDValue Tmp1
= getValue(I
.getArgOperand(1));
7607 EVT VT
= Tmp0
.getValueType();
7608 setValue(&I
, DAG
.getNode(Opcode
, getCurSDLoc(), VT
, Tmp0
, Tmp1
));
7612 void SelectionDAGBuilder::visitCall(const CallInst
&I
) {
7613 // Handle inline assembly differently.
7614 if (isa
<InlineAsm
>(I
.getCalledValue())) {
7619 if (Function
*F
= I
.getCalledFunction()) {
7620 if (F
->isDeclaration()) {
7621 // Is this an LLVM intrinsic or a target-specific intrinsic?
7622 unsigned IID
= F
->getIntrinsicID();
7624 if (const TargetIntrinsicInfo
*II
= TM
.getIntrinsicInfo())
7625 IID
= II
->getIntrinsicID(F
);
7628 visitIntrinsicCall(I
, IID
);
7633 // Check for well-known libc/libm calls. If the function is internal, it
7634 // can't be a library call. Don't do the check if marked as nobuiltin for
7635 // some reason or the call site requires strict floating point semantics.
7637 if (!I
.isNoBuiltin() && !I
.isStrictFP() && !F
->hasLocalLinkage() &&
7638 F
->hasName() && LibInfo
->getLibFunc(*F
, Func
) &&
7639 LibInfo
->hasOptimizedCodeGen(Func
)) {
7642 case LibFunc_copysign
:
7643 case LibFunc_copysignf
:
7644 case LibFunc_copysignl
:
7645 // We already checked this call's prototype; verify it doesn't modify
7647 if (I
.onlyReadsMemory()) {
7648 SDValue LHS
= getValue(I
.getArgOperand(0));
7649 SDValue RHS
= getValue(I
.getArgOperand(1));
7650 setValue(&I
, DAG
.getNode(ISD::FCOPYSIGN
, getCurSDLoc(),
7651 LHS
.getValueType(), LHS
, RHS
));
7658 if (visitUnaryFloatCall(I
, ISD::FABS
))
7664 if (visitBinaryFloatCall(I
, ISD::FMINNUM
))
7670 if (visitBinaryFloatCall(I
, ISD::FMAXNUM
))
7676 if (visitUnaryFloatCall(I
, ISD::FSIN
))
7682 if (visitUnaryFloatCall(I
, ISD::FCOS
))
7688 case LibFunc_sqrt_finite
:
7689 case LibFunc_sqrtf_finite
:
7690 case LibFunc_sqrtl_finite
:
7691 if (visitUnaryFloatCall(I
, ISD::FSQRT
))
7695 case LibFunc_floorf
:
7696 case LibFunc_floorl
:
7697 if (visitUnaryFloatCall(I
, ISD::FFLOOR
))
7700 case LibFunc_nearbyint
:
7701 case LibFunc_nearbyintf
:
7702 case LibFunc_nearbyintl
:
7703 if (visitUnaryFloatCall(I
, ISD::FNEARBYINT
))
7709 if (visitUnaryFloatCall(I
, ISD::FCEIL
))
7715 if (visitUnaryFloatCall(I
, ISD::FRINT
))
7719 case LibFunc_roundf
:
7720 case LibFunc_roundl
:
7721 if (visitUnaryFloatCall(I
, ISD::FROUND
))
7725 case LibFunc_truncf
:
7726 case LibFunc_truncl
:
7727 if (visitUnaryFloatCall(I
, ISD::FTRUNC
))
7733 if (visitUnaryFloatCall(I
, ISD::FLOG2
))
7739 if (visitUnaryFloatCall(I
, ISD::FEXP2
))
7742 case LibFunc_memcmp
:
7743 if (visitMemCmpCall(I
))
7746 case LibFunc_mempcpy
:
7747 if (visitMemPCpyCall(I
))
7750 case LibFunc_memchr
:
7751 if (visitMemChrCall(I
))
7754 case LibFunc_strcpy
:
7755 if (visitStrCpyCall(I
, false))
7758 case LibFunc_stpcpy
:
7759 if (visitStrCpyCall(I
, true))
7762 case LibFunc_strcmp
:
7763 if (visitStrCmpCall(I
))
7766 case LibFunc_strlen
:
7767 if (visitStrLenCall(I
))
7770 case LibFunc_strnlen
:
7771 if (visitStrNLenCall(I
))
7778 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
7779 // have to do anything here to lower funclet bundles.
7780 // CFGuardTarget bundles are lowered in LowerCallTo.
7781 assert(!I
.hasOperandBundlesOtherThan({LLVMContext::OB_deopt
,
7782 LLVMContext::OB_funclet
,
7783 LLVMContext::OB_cfguardtarget
}) &&
7784 "Cannot lower calls with arbitrary operand bundles!");
7786 SDValue Callee
= getValue(I
.getCalledValue());
7788 if (I
.countOperandBundlesOfType(LLVMContext::OB_deopt
))
7789 LowerCallSiteWithDeoptBundle(&I
, Callee
, nullptr);
7791 // Check if we can potentially perform a tail call. More detailed checking
7792 // is be done within LowerCallTo, after more information about the call is
7794 LowerCallTo(&I
, Callee
, I
.isTailCall());
7799 /// AsmOperandInfo - This contains information for each constraint that we are
7801 class SDISelAsmOperandInfo
: public TargetLowering::AsmOperandInfo
{
7803 /// CallOperand - If this is the result output operand or a clobber
7804 /// this is null, otherwise it is the incoming operand to the CallInst.
7805 /// This gets modified as the asm is processed.
7806 SDValue CallOperand
;
7808 /// AssignedRegs - If this is a register or register class operand, this
7809 /// contains the set of register corresponding to the operand.
7810 RegsForValue AssignedRegs
;
7812 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo
&info
)
7813 : TargetLowering::AsmOperandInfo(info
), CallOperand(nullptr, 0) {
7816 /// Whether or not this operand accesses memory
7817 bool hasMemory(const TargetLowering
&TLI
) const {
7818 // Indirect operand accesses access memory.
7822 for (const auto &Code
: Codes
)
7823 if (TLI
.getConstraintType(Code
) == TargetLowering::C_Memory
)
7829 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
7830 /// corresponds to. If there is no Value* for this operand, it returns
7832 EVT
getCallOperandValEVT(LLVMContext
&Context
, const TargetLowering
&TLI
,
7833 const DataLayout
&DL
) const {
7834 if (!CallOperandVal
) return MVT::Other
;
7836 if (isa
<BasicBlock
>(CallOperandVal
))
7837 return TLI
.getPointerTy(DL
);
7839 llvm::Type
*OpTy
= CallOperandVal
->getType();
7841 // FIXME: code duplicated from TargetLowering::ParseConstraints().
7842 // If this is an indirect operand, the operand is a pointer to the
7845 PointerType
*PtrTy
= dyn_cast
<PointerType
>(OpTy
);
7847 report_fatal_error("Indirect operand for inline asm not a pointer!");
7848 OpTy
= PtrTy
->getElementType();
7851 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
7852 if (StructType
*STy
= dyn_cast
<StructType
>(OpTy
))
7853 if (STy
->getNumElements() == 1)
7854 OpTy
= STy
->getElementType(0);
7856 // If OpTy is not a single value, it may be a struct/union that we
7857 // can tile with integers.
7858 if (!OpTy
->isSingleValueType() && OpTy
->isSized()) {
7859 unsigned BitSize
= DL
.getTypeSizeInBits(OpTy
);
7868 OpTy
= IntegerType::get(Context
, BitSize
);
7873 return TLI
.getValueType(DL
, OpTy
, true);
7877 using SDISelAsmOperandInfoVector
= SmallVector
<SDISelAsmOperandInfo
, 16>;
7879 } // end anonymous namespace
7881 /// Make sure that the output operand \p OpInfo and its corresponding input
7882 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
7884 static void patchMatchingInput(const SDISelAsmOperandInfo
&OpInfo
,
7885 SDISelAsmOperandInfo
&MatchingOpInfo
,
7886 SelectionDAG
&DAG
) {
7887 if (OpInfo
.ConstraintVT
== MatchingOpInfo
.ConstraintVT
)
7890 const TargetRegisterInfo
*TRI
= DAG
.getSubtarget().getRegisterInfo();
7891 const auto &TLI
= DAG
.getTargetLoweringInfo();
7893 std::pair
<unsigned, const TargetRegisterClass
*> MatchRC
=
7894 TLI
.getRegForInlineAsmConstraint(TRI
, OpInfo
.ConstraintCode
,
7895 OpInfo
.ConstraintVT
);
7896 std::pair
<unsigned, const TargetRegisterClass
*> InputRC
=
7897 TLI
.getRegForInlineAsmConstraint(TRI
, MatchingOpInfo
.ConstraintCode
,
7898 MatchingOpInfo
.ConstraintVT
);
7899 if ((OpInfo
.ConstraintVT
.isInteger() !=
7900 MatchingOpInfo
.ConstraintVT
.isInteger()) ||
7901 (MatchRC
.second
!= InputRC
.second
)) {
7902 // FIXME: error out in a more elegant fashion
7903 report_fatal_error("Unsupported asm: input constraint"
7904 " with a matching output constraint of"
7905 " incompatible type!");
7907 MatchingOpInfo
.ConstraintVT
= OpInfo
.ConstraintVT
;
7910 /// Get a direct memory input to behave well as an indirect operand.
7911 /// This may introduce stores, hence the need for a \p Chain.
7912 /// \return The (possibly updated) chain.
7913 static SDValue
getAddressForMemoryInput(SDValue Chain
, const SDLoc
&Location
,
7914 SDISelAsmOperandInfo
&OpInfo
,
7915 SelectionDAG
&DAG
) {
7916 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
7918 // If we don't have an indirect input, put it in the constpool if we can,
7919 // otherwise spill it to a stack slot.
7920 // TODO: This isn't quite right. We need to handle these according to
7921 // the addressing mode that the constraint wants. Also, this may take
7922 // an additional register for the computation and we don't want that
7925 // If the operand is a float, integer, or vector constant, spill to a
7926 // constant pool entry to get its address.
7927 const Value
*OpVal
= OpInfo
.CallOperandVal
;
7928 if (isa
<ConstantFP
>(OpVal
) || isa
<ConstantInt
>(OpVal
) ||
7929 isa
<ConstantVector
>(OpVal
) || isa
<ConstantDataVector
>(OpVal
)) {
7930 OpInfo
.CallOperand
= DAG
.getConstantPool(
7931 cast
<Constant
>(OpVal
), TLI
.getPointerTy(DAG
.getDataLayout()));
7935 // Otherwise, create a stack slot and emit a store to it before the asm.
7936 Type
*Ty
= OpVal
->getType();
7937 auto &DL
= DAG
.getDataLayout();
7938 uint64_t TySize
= DL
.getTypeAllocSize(Ty
);
7939 unsigned Align
= DL
.getPrefTypeAlignment(Ty
);
7940 MachineFunction
&MF
= DAG
.getMachineFunction();
7941 int SSFI
= MF
.getFrameInfo().CreateStackObject(TySize
, Align
, false);
7942 SDValue StackSlot
= DAG
.getFrameIndex(SSFI
, TLI
.getFrameIndexTy(DL
));
7943 Chain
= DAG
.getTruncStore(Chain
, Location
, OpInfo
.CallOperand
, StackSlot
,
7944 MachinePointerInfo::getFixedStack(MF
, SSFI
),
7945 TLI
.getMemValueType(DL
, Ty
));
7946 OpInfo
.CallOperand
= StackSlot
;
7951 /// GetRegistersForValue - Assign registers (virtual or physical) for the
7952 /// specified operand. We prefer to assign virtual registers, to allow the
7953 /// register allocator to handle the assignment process. However, if the asm
7954 /// uses features that we can't model on machineinstrs, we have SDISel do the
7955 /// allocation. This produces generally horrible, but correct, code.
7957 /// OpInfo describes the operand
7958 /// RefOpInfo describes the matching operand if any, the operand otherwise
7959 static void GetRegistersForValue(SelectionDAG
&DAG
, const SDLoc
&DL
,
7960 SDISelAsmOperandInfo
&OpInfo
,
7961 SDISelAsmOperandInfo
&RefOpInfo
) {
7962 LLVMContext
&Context
= *DAG
.getContext();
7963 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
7965 MachineFunction
&MF
= DAG
.getMachineFunction();
7966 SmallVector
<unsigned, 4> Regs
;
7967 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
7969 // No work to do for memory operations.
7970 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
)
7973 // If this is a constraint for a single physreg, or a constraint for a
7974 // register class, find it.
7975 unsigned AssignedReg
;
7976 const TargetRegisterClass
*RC
;
7977 std::tie(AssignedReg
, RC
) = TLI
.getRegForInlineAsmConstraint(
7978 &TRI
, RefOpInfo
.ConstraintCode
, RefOpInfo
.ConstraintVT
);
7979 // RC is unset only on failure. Return immediately.
7983 // Get the actual register value type. This is important, because the user
7984 // may have asked for (e.g.) the AX register in i32 type. We need to
7985 // remember that AX is actually i16 to get the right extension.
7986 const MVT RegVT
= *TRI
.legalclasstypes_begin(*RC
);
7988 if (OpInfo
.ConstraintVT
!= MVT::Other
) {
7989 // If this is an FP operand in an integer register (or visa versa), or more
7990 // generally if the operand value disagrees with the register class we plan
7991 // to stick it in, fix the operand type.
7993 // If this is an input value, the bitcast to the new type is done now.
7994 // Bitcast for output value is done at the end of visitInlineAsm().
7995 if ((OpInfo
.Type
== InlineAsm::isOutput
||
7996 OpInfo
.Type
== InlineAsm::isInput
) &&
7997 !TRI
.isTypeLegalForClass(*RC
, OpInfo
.ConstraintVT
)) {
7998 // Try to convert to the first EVT that the reg class contains. If the
7999 // types are identical size, use a bitcast to convert (e.g. two differing
8000 // vector types). Note: output bitcast is done at the end of
8001 // visitInlineAsm().
8002 if (RegVT
.getSizeInBits() == OpInfo
.ConstraintVT
.getSizeInBits()) {
8003 // Exclude indirect inputs while they are unsupported because the code
8004 // to perform the load is missing and thus OpInfo.CallOperand still
8005 // refers to the input address rather than the pointed-to value.
8006 if (OpInfo
.Type
== InlineAsm::isInput
&& !OpInfo
.isIndirect
)
8007 OpInfo
.CallOperand
=
8008 DAG
.getNode(ISD::BITCAST
, DL
, RegVT
, OpInfo
.CallOperand
);
8009 OpInfo
.ConstraintVT
= RegVT
;
8010 // If the operand is an FP value and we want it in integer registers,
8011 // use the corresponding integer type. This turns an f64 value into
8012 // i64, which can be passed with two i32 values on a 32-bit machine.
8013 } else if (RegVT
.isInteger() && OpInfo
.ConstraintVT
.isFloatingPoint()) {
8014 MVT VT
= MVT::getIntegerVT(OpInfo
.ConstraintVT
.getSizeInBits());
8015 if (OpInfo
.Type
== InlineAsm::isInput
)
8016 OpInfo
.CallOperand
=
8017 DAG
.getNode(ISD::BITCAST
, DL
, VT
, OpInfo
.CallOperand
);
8018 OpInfo
.ConstraintVT
= VT
;
8023 // No need to allocate a matching input constraint since the constraint it's
8024 // matching to has already been allocated.
8025 if (OpInfo
.isMatchingInputConstraint())
8028 EVT ValueVT
= OpInfo
.ConstraintVT
;
8029 if (OpInfo
.ConstraintVT
== MVT::Other
)
8032 // Initialize NumRegs.
8033 unsigned NumRegs
= 1;
8034 if (OpInfo
.ConstraintVT
!= MVT::Other
)
8035 NumRegs
= TLI
.getNumRegisters(Context
, OpInfo
.ConstraintVT
);
8037 // If this is a constraint for a specific physical register, like {r17},
8040 // If this associated to a specific register, initialize iterator to correct
8041 // place. If virtual, make sure we have enough registers
8043 // Initialize iterator if necessary
8044 TargetRegisterClass::iterator I
= RC
->begin();
8045 MachineRegisterInfo
&RegInfo
= MF
.getRegInfo();
8047 // Do not check for single registers.
8049 for (; *I
!= AssignedReg
; ++I
)
8050 assert(I
!= RC
->end() && "AssignedReg should be member of RC");
8053 for (; NumRegs
; --NumRegs
, ++I
) {
8054 assert(I
!= RC
->end() && "Ran out of registers to allocate!");
8055 Register R
= AssignedReg
? Register(*I
) : RegInfo
.createVirtualRegister(RC
);
8059 OpInfo
.AssignedRegs
= RegsForValue(Regs
, RegVT
, ValueVT
);
8063 findMatchingInlineAsmOperand(unsigned OperandNo
,
8064 const std::vector
<SDValue
> &AsmNodeOperands
) {
8065 // Scan until we find the definition we already emitted of this operand.
8066 unsigned CurOp
= InlineAsm::Op_FirstOperand
;
8067 for (; OperandNo
; --OperandNo
) {
8068 // Advance to the next operand.
8070 cast
<ConstantSDNode
>(AsmNodeOperands
[CurOp
])->getZExtValue();
8071 assert((InlineAsm::isRegDefKind(OpFlag
) ||
8072 InlineAsm::isRegDefEarlyClobberKind(OpFlag
) ||
8073 InlineAsm::isMemKind(OpFlag
)) &&
8074 "Skipped past definitions?");
8075 CurOp
+= InlineAsm::getNumOperandRegisters(OpFlag
) + 1;
8086 explicit ExtraFlags(ImmutableCallSite CS
) {
8087 const InlineAsm
*IA
= cast
<InlineAsm
>(CS
.getCalledValue());
8088 if (IA
->hasSideEffects())
8089 Flags
|= InlineAsm::Extra_HasSideEffects
;
8090 if (IA
->isAlignStack())
8091 Flags
|= InlineAsm::Extra_IsAlignStack
;
8092 if (CS
.isConvergent())
8093 Flags
|= InlineAsm::Extra_IsConvergent
;
8094 Flags
|= IA
->getDialect() * InlineAsm::Extra_AsmDialect
;
8097 void update(const TargetLowering::AsmOperandInfo
&OpInfo
) {
8098 // Ideally, we would only check against memory constraints. However, the
8099 // meaning of an Other constraint can be target-specific and we can't easily
8100 // reason about it. Therefore, be conservative and set MayLoad/MayStore
8101 // for Other constraints as well.
8102 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
||
8103 OpInfo
.ConstraintType
== TargetLowering::C_Other
) {
8104 if (OpInfo
.Type
== InlineAsm::isInput
)
8105 Flags
|= InlineAsm::Extra_MayLoad
;
8106 else if (OpInfo
.Type
== InlineAsm::isOutput
)
8107 Flags
|= InlineAsm::Extra_MayStore
;
8108 else if (OpInfo
.Type
== InlineAsm::isClobber
)
8109 Flags
|= (InlineAsm::Extra_MayLoad
| InlineAsm::Extra_MayStore
);
8113 unsigned get() const { return Flags
; }
8116 } // end anonymous namespace
8118 /// visitInlineAsm - Handle a call to an InlineAsm object.
8119 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS
) {
8120 const InlineAsm
*IA
= cast
<InlineAsm
>(CS
.getCalledValue());
8122 /// ConstraintOperands - Information about all of the constraints.
8123 SDISelAsmOperandInfoVector ConstraintOperands
;
8125 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
8126 TargetLowering::AsmOperandInfoVector TargetConstraints
= TLI
.ParseConstraints(
8127 DAG
.getDataLayout(), DAG
.getSubtarget().getRegisterInfo(), CS
);
8129 // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
8130 // AsmDialect, MayLoad, MayStore).
8131 bool HasSideEffect
= IA
->hasSideEffects();
8132 ExtraFlags
ExtraInfo(CS
);
8134 unsigned ArgNo
= 0; // ArgNo - The argument of the CallInst.
8135 unsigned ResNo
= 0; // ResNo - The result number of the next output.
8136 for (auto &T
: TargetConstraints
) {
8137 ConstraintOperands
.push_back(SDISelAsmOperandInfo(T
));
8138 SDISelAsmOperandInfo
&OpInfo
= ConstraintOperands
.back();
8140 // Compute the value type for each operand.
8141 if (OpInfo
.Type
== InlineAsm::isInput
||
8142 (OpInfo
.Type
== InlineAsm::isOutput
&& OpInfo
.isIndirect
)) {
8143 OpInfo
.CallOperandVal
= const_cast<Value
*>(CS
.getArgument(ArgNo
++));
8145 // Process the call argument. BasicBlocks are labels, currently appearing
8147 const Instruction
*I
= CS
.getInstruction();
8148 if (isa
<CallBrInst
>(I
) &&
8149 (ArgNo
- 1) >= (cast
<CallBrInst
>(I
)->getNumArgOperands() -
8150 cast
<CallBrInst
>(I
)->getNumIndirectDests())) {
8151 const auto *BA
= cast
<BlockAddress
>(OpInfo
.CallOperandVal
);
8152 EVT VT
= TLI
.getValueType(DAG
.getDataLayout(), BA
->getType(), true);
8153 OpInfo
.CallOperand
= DAG
.getTargetBlockAddress(BA
, VT
);
8154 } else if (const auto *BB
= dyn_cast
<BasicBlock
>(OpInfo
.CallOperandVal
)) {
8155 OpInfo
.CallOperand
= DAG
.getBasicBlock(FuncInfo
.MBBMap
[BB
]);
8157 OpInfo
.CallOperand
= getValue(OpInfo
.CallOperandVal
);
8160 OpInfo
.ConstraintVT
=
8162 .getCallOperandValEVT(*DAG
.getContext(), TLI
, DAG
.getDataLayout())
8164 } else if (OpInfo
.Type
== InlineAsm::isOutput
&& !OpInfo
.isIndirect
) {
8165 // The return value of the call is this value. As such, there is no
8166 // corresponding argument.
8167 assert(!CS
.getType()->isVoidTy() && "Bad inline asm!");
8168 if (StructType
*STy
= dyn_cast
<StructType
>(CS
.getType())) {
8169 OpInfo
.ConstraintVT
= TLI
.getSimpleValueType(
8170 DAG
.getDataLayout(), STy
->getElementType(ResNo
));
8172 assert(ResNo
== 0 && "Asm only has one result!");
8173 OpInfo
.ConstraintVT
=
8174 TLI
.getSimpleValueType(DAG
.getDataLayout(), CS
.getType());
8178 OpInfo
.ConstraintVT
= MVT::Other
;
8182 HasSideEffect
= OpInfo
.hasMemory(TLI
);
8184 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
8185 // FIXME: Could we compute this on OpInfo rather than T?
8187 // Compute the constraint code and ConstraintType to use.
8188 TLI
.ComputeConstraintToUse(T
, SDValue());
8190 if (T
.ConstraintType
== TargetLowering::C_Immediate
&&
8191 OpInfo
.CallOperand
&& !isa
<ConstantSDNode
>(OpInfo
.CallOperand
))
8192 // We've delayed emitting a diagnostic like the "n" constraint because
8193 // inlining could cause an integer showing up.
8194 return emitInlineAsmError(
8195 CS
, "constraint '" + Twine(T
.ConstraintCode
) + "' expects an "
8196 "integer constant expression");
8198 ExtraInfo
.update(T
);
8202 // We won't need to flush pending loads if this asm doesn't touch
8203 // memory and is nonvolatile.
8204 SDValue Flag
, Chain
= (HasSideEffect
) ? getRoot() : DAG
.getRoot();
8206 bool IsCallBr
= isa
<CallBrInst
>(CS
.getInstruction());
8208 // If this is a callbr we need to flush pending exports since inlineasm_br
8209 // is a terminator. We need to do this before nodes are glued to
8210 // the inlineasm_br node.
8211 Chain
= getControlRoot();
8214 // Second pass over the constraints: compute which constraint option to use.
8215 for (SDISelAsmOperandInfo
&OpInfo
: ConstraintOperands
) {
8216 // If this is an output operand with a matching input operand, look up the
8217 // matching input. If their types mismatch, e.g. one is an integer, the
8218 // other is floating point, or their sizes are different, flag it as an
8220 if (OpInfo
.hasMatchingInput()) {
8221 SDISelAsmOperandInfo
&Input
= ConstraintOperands
[OpInfo
.MatchingInput
];
8222 patchMatchingInput(OpInfo
, Input
, DAG
);
8225 // Compute the constraint code and ConstraintType to use.
8226 TLI
.ComputeConstraintToUse(OpInfo
, OpInfo
.CallOperand
, &DAG
);
8228 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
&&
8229 OpInfo
.Type
== InlineAsm::isClobber
)
8232 // If this is a memory input, and if the operand is not indirect, do what we
8233 // need to provide an address for the memory input.
8234 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
&&
8235 !OpInfo
.isIndirect
) {
8236 assert((OpInfo
.isMultipleAlternative
||
8237 (OpInfo
.Type
== InlineAsm::isInput
)) &&
8238 "Can only indirectify direct input operands!");
8240 // Memory operands really want the address of the value.
8241 Chain
= getAddressForMemoryInput(Chain
, getCurSDLoc(), OpInfo
, DAG
);
8243 // There is no longer a Value* corresponding to this operand.
8244 OpInfo
.CallOperandVal
= nullptr;
8246 // It is now an indirect operand.
8247 OpInfo
.isIndirect
= true;
8252 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
8253 std::vector
<SDValue
> AsmNodeOperands
;
8254 AsmNodeOperands
.push_back(SDValue()); // reserve space for input chain
8255 AsmNodeOperands
.push_back(DAG
.getTargetExternalSymbol(
8256 IA
->getAsmString().c_str(), TLI
.getPointerTy(DAG
.getDataLayout())));
8258 // If we have a !srcloc metadata node associated with it, we want to attach
8259 // this to the ultimately generated inline asm machineinstr. To do this, we
8260 // pass in the third operand as this (potentially null) inline asm MDNode.
8261 const MDNode
*SrcLoc
= CS
.getInstruction()->getMetadata("srcloc");
8262 AsmNodeOperands
.push_back(DAG
.getMDNode(SrcLoc
));
8264 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
8265 // bits as operand 3.
8266 AsmNodeOperands
.push_back(DAG
.getTargetConstant(
8267 ExtraInfo
.get(), getCurSDLoc(), TLI
.getPointerTy(DAG
.getDataLayout())));
8269 // Third pass: Loop over operands to prepare DAG-level operands.. As part of
8270 // this, assign virtual and physical registers for inputs and otput.
8271 for (SDISelAsmOperandInfo
&OpInfo
: ConstraintOperands
) {
8272 // Assign Registers.
8273 SDISelAsmOperandInfo
&RefOpInfo
=
8274 OpInfo
.isMatchingInputConstraint()
8275 ? ConstraintOperands
[OpInfo
.getMatchedOperand()]
8277 GetRegistersForValue(DAG
, getCurSDLoc(), OpInfo
, RefOpInfo
);
8279 switch (OpInfo
.Type
) {
8280 case InlineAsm::isOutput
:
8281 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
) {
8282 unsigned ConstraintID
=
8283 TLI
.getInlineAsmMemConstraint(OpInfo
.ConstraintCode
);
8284 assert(ConstraintID
!= InlineAsm::Constraint_Unknown
&&
8285 "Failed to convert memory constraint code to constraint id.");
8287 // Add information to the INLINEASM node to know about this output.
8288 unsigned OpFlags
= InlineAsm::getFlagWord(InlineAsm::Kind_Mem
, 1);
8289 OpFlags
= InlineAsm::getFlagWordForMem(OpFlags
, ConstraintID
);
8290 AsmNodeOperands
.push_back(DAG
.getTargetConstant(OpFlags
, getCurSDLoc(),
8292 AsmNodeOperands
.push_back(OpInfo
.CallOperand
);
8294 // Otherwise, this outputs to a register (directly for C_Register /
8295 // C_RegisterClass, and a target-defined fashion for
8296 // C_Immediate/C_Other). Find a register that we can use.
8297 if (OpInfo
.AssignedRegs
.Regs
.empty()) {
8299 CS
, "couldn't allocate output register for constraint '" +
8300 Twine(OpInfo
.ConstraintCode
) + "'");
8304 // Add information to the INLINEASM node to know that this register is
8306 OpInfo
.AssignedRegs
.AddInlineAsmOperands(
8307 OpInfo
.isEarlyClobber
? InlineAsm::Kind_RegDefEarlyClobber
8308 : InlineAsm::Kind_RegDef
,
8309 false, 0, getCurSDLoc(), DAG
, AsmNodeOperands
);
8313 case InlineAsm::isInput
: {
8314 SDValue InOperandVal
= OpInfo
.CallOperand
;
8316 if (OpInfo
.isMatchingInputConstraint()) {
8317 // If this is required to match an output register we have already set,
8318 // just use its register.
8319 auto CurOp
= findMatchingInlineAsmOperand(OpInfo
.getMatchedOperand(),
8322 cast
<ConstantSDNode
>(AsmNodeOperands
[CurOp
])->getZExtValue();
8323 if (InlineAsm::isRegDefKind(OpFlag
) ||
8324 InlineAsm::isRegDefEarlyClobberKind(OpFlag
)) {
8325 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
8326 if (OpInfo
.isIndirect
) {
8327 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
8328 emitInlineAsmError(CS
, "inline asm not supported yet:"
8329 " don't know how to handle tied "
8330 "indirect register inputs");
8334 MVT RegVT
= AsmNodeOperands
[CurOp
+1].getSimpleValueType();
8335 SmallVector
<unsigned, 4> Regs
;
8337 if (const TargetRegisterClass
*RC
= TLI
.getRegClassFor(RegVT
)) {
8338 unsigned NumRegs
= InlineAsm::getNumOperandRegisters(OpFlag
);
8339 MachineRegisterInfo
&RegInfo
=
8340 DAG
.getMachineFunction().getRegInfo();
8341 for (unsigned i
= 0; i
!= NumRegs
; ++i
)
8342 Regs
.push_back(RegInfo
.createVirtualRegister(RC
));
8344 emitInlineAsmError(CS
, "inline asm error: This value type register "
8345 "class is not natively supported!");
8349 RegsForValue
MatchedRegs(Regs
, RegVT
, InOperandVal
.getValueType());
8351 SDLoc dl
= getCurSDLoc();
8352 // Use the produced MatchedRegs object to
8353 MatchedRegs
.getCopyToRegs(InOperandVal
, DAG
, dl
, Chain
, &Flag
,
8354 CS
.getInstruction());
8355 MatchedRegs
.AddInlineAsmOperands(InlineAsm::Kind_RegUse
,
8356 true, OpInfo
.getMatchedOperand(), dl
,
8357 DAG
, AsmNodeOperands
);
8361 assert(InlineAsm::isMemKind(OpFlag
) && "Unknown matching constraint!");
8362 assert(InlineAsm::getNumOperandRegisters(OpFlag
) == 1 &&
8363 "Unexpected number of operands");
8364 // Add information to the INLINEASM node to know about this input.
8365 // See InlineAsm.h isUseOperandTiedToDef.
8366 OpFlag
= InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag
);
8367 OpFlag
= InlineAsm::getFlagWordForMatchingOp(OpFlag
,
8368 OpInfo
.getMatchedOperand());
8369 AsmNodeOperands
.push_back(DAG
.getTargetConstant(
8370 OpFlag
, getCurSDLoc(), TLI
.getPointerTy(DAG
.getDataLayout())));
8371 AsmNodeOperands
.push_back(AsmNodeOperands
[CurOp
+1]);
8375 // Treat indirect 'X' constraint as memory.
8376 if (OpInfo
.ConstraintType
== TargetLowering::C_Other
&&
8378 OpInfo
.ConstraintType
= TargetLowering::C_Memory
;
8380 if (OpInfo
.ConstraintType
== TargetLowering::C_Immediate
||
8381 OpInfo
.ConstraintType
== TargetLowering::C_Other
) {
8382 std::vector
<SDValue
> Ops
;
8383 TLI
.LowerAsmOperandForConstraint(InOperandVal
, OpInfo
.ConstraintCode
,
8386 if (OpInfo
.ConstraintType
== TargetLowering::C_Immediate
)
8387 if (isa
<ConstantSDNode
>(InOperandVal
)) {
8388 emitInlineAsmError(CS
, "value out of range for constraint '" +
8389 Twine(OpInfo
.ConstraintCode
) + "'");
8393 emitInlineAsmError(CS
, "invalid operand for inline asm constraint '" +
8394 Twine(OpInfo
.ConstraintCode
) + "'");
8398 // Add information to the INLINEASM node to know about this input.
8399 unsigned ResOpType
=
8400 InlineAsm::getFlagWord(InlineAsm::Kind_Imm
, Ops
.size());
8401 AsmNodeOperands
.push_back(DAG
.getTargetConstant(
8402 ResOpType
, getCurSDLoc(), TLI
.getPointerTy(DAG
.getDataLayout())));
8403 AsmNodeOperands
.insert(AsmNodeOperands
.end(), Ops
.begin(), Ops
.end());
8407 if (OpInfo
.ConstraintType
== TargetLowering::C_Memory
) {
8408 assert(OpInfo
.isIndirect
&& "Operand must be indirect to be a mem!");
8409 assert(InOperandVal
.getValueType() ==
8410 TLI
.getPointerTy(DAG
.getDataLayout()) &&
8411 "Memory operands expect pointer values");
8413 unsigned ConstraintID
=
8414 TLI
.getInlineAsmMemConstraint(OpInfo
.ConstraintCode
);
8415 assert(ConstraintID
!= InlineAsm::Constraint_Unknown
&&
8416 "Failed to convert memory constraint code to constraint id.");
8418 // Add information to the INLINEASM node to know about this input.
8419 unsigned ResOpType
= InlineAsm::getFlagWord(InlineAsm::Kind_Mem
, 1);
8420 ResOpType
= InlineAsm::getFlagWordForMem(ResOpType
, ConstraintID
);
8421 AsmNodeOperands
.push_back(DAG
.getTargetConstant(ResOpType
,
8424 AsmNodeOperands
.push_back(InOperandVal
);
8428 assert((OpInfo
.ConstraintType
== TargetLowering::C_RegisterClass
||
8429 OpInfo
.ConstraintType
== TargetLowering::C_Register
) &&
8430 "Unknown constraint type!");
8432 // TODO: Support this.
8433 if (OpInfo
.isIndirect
) {
8435 CS
, "Don't know how to handle indirect register inputs yet "
8436 "for constraint '" +
8437 Twine(OpInfo
.ConstraintCode
) + "'");
8441 // Copy the input into the appropriate registers.
8442 if (OpInfo
.AssignedRegs
.Regs
.empty()) {
8443 emitInlineAsmError(CS
, "couldn't allocate input reg for constraint '" +
8444 Twine(OpInfo
.ConstraintCode
) + "'");
8448 SDLoc dl
= getCurSDLoc();
8450 OpInfo
.AssignedRegs
.getCopyToRegs(InOperandVal
, DAG
, dl
,
8451 Chain
, &Flag
, CS
.getInstruction());
8453 OpInfo
.AssignedRegs
.AddInlineAsmOperands(InlineAsm::Kind_RegUse
, false, 0,
8454 dl
, DAG
, AsmNodeOperands
);
8457 case InlineAsm::isClobber
:
8458 // Add the clobbered value to the operand list, so that the register
8459 // allocator is aware that the physreg got clobbered.
8460 if (!OpInfo
.AssignedRegs
.Regs
.empty())
8461 OpInfo
.AssignedRegs
.AddInlineAsmOperands(InlineAsm::Kind_Clobber
,
8462 false, 0, getCurSDLoc(), DAG
,
8468 // Finish up input operands. Set the input chain and add the flag last.
8469 AsmNodeOperands
[InlineAsm::Op_InputChain
] = Chain
;
8470 if (Flag
.getNode()) AsmNodeOperands
.push_back(Flag
);
8472 unsigned ISDOpc
= IsCallBr
? ISD::INLINEASM_BR
: ISD::INLINEASM
;
8473 Chain
= DAG
.getNode(ISDOpc
, getCurSDLoc(),
8474 DAG
.getVTList(MVT::Other
, MVT::Glue
), AsmNodeOperands
);
8475 Flag
= Chain
.getValue(1);
8477 // Do additional work to generate outputs.
8479 SmallVector
<EVT
, 1> ResultVTs
;
8480 SmallVector
<SDValue
, 1> ResultValues
;
8481 SmallVector
<SDValue
, 8> OutChains
;
8483 llvm::Type
*CSResultType
= CS
.getType();
8484 ArrayRef
<Type
*> ResultTypes
;
8485 if (StructType
*StructResult
= dyn_cast
<StructType
>(CSResultType
))
8486 ResultTypes
= StructResult
->elements();
8487 else if (!CSResultType
->isVoidTy())
8488 ResultTypes
= makeArrayRef(CSResultType
);
8490 auto CurResultType
= ResultTypes
.begin();
8491 auto handleRegAssign
= [&](SDValue V
) {
8492 assert(CurResultType
!= ResultTypes
.end() && "Unexpected value");
8493 assert((*CurResultType
)->isSized() && "Unexpected unsized type");
8494 EVT ResultVT
= TLI
.getValueType(DAG
.getDataLayout(), *CurResultType
);
8496 // If the type of the inline asm call site return value is different but has
8497 // same size as the type of the asm output bitcast it. One example of this
8498 // is for vectors with different width / number of elements. This can
8499 // happen for register classes that can contain multiple different value
8500 // types. The preg or vreg allocated may not have the same VT as was
8503 // This can also happen for a return value that disagrees with the register
8504 // class it is put in, eg. a double in a general-purpose register on a
8506 if (ResultVT
!= V
.getValueType() &&
8507 ResultVT
.getSizeInBits() == V
.getValueSizeInBits())
8508 V
= DAG
.getNode(ISD::BITCAST
, getCurSDLoc(), ResultVT
, V
);
8509 else if (ResultVT
!= V
.getValueType() && ResultVT
.isInteger() &&
8510 V
.getValueType().isInteger()) {
8511 // If a result value was tied to an input value, the computed result
8512 // may have a wider width than the expected result. Extract the
8513 // relevant portion.
8514 V
= DAG
.getNode(ISD::TRUNCATE
, getCurSDLoc(), ResultVT
, V
);
8516 assert(ResultVT
== V
.getValueType() && "Asm result value mismatch!");
8517 ResultVTs
.push_back(ResultVT
);
8518 ResultValues
.push_back(V
);
8521 // Deal with output operands.
8522 for (SDISelAsmOperandInfo
&OpInfo
: ConstraintOperands
) {
8523 if (OpInfo
.Type
== InlineAsm::isOutput
) {
8525 // Skip trivial output operands.
8526 if (OpInfo
.AssignedRegs
.Regs
.empty())
8529 switch (OpInfo
.ConstraintType
) {
8530 case TargetLowering::C_Register
:
8531 case TargetLowering::C_RegisterClass
:
8532 Val
= OpInfo
.AssignedRegs
.getCopyFromRegs(
8533 DAG
, FuncInfo
, getCurSDLoc(), Chain
, &Flag
, CS
.getInstruction());
8535 case TargetLowering::C_Immediate
:
8536 case TargetLowering::C_Other
:
8537 Val
= TLI
.LowerAsmOutputForConstraint(Chain
, Flag
, getCurSDLoc(),
8540 case TargetLowering::C_Memory
:
8541 break; // Already handled.
8542 case TargetLowering::C_Unknown
:
8543 assert(false && "Unexpected unknown constraint");
8546 // Indirect output manifest as stores. Record output chains.
8547 if (OpInfo
.isIndirect
) {
8548 const Value
*Ptr
= OpInfo
.CallOperandVal
;
8549 assert(Ptr
&& "Expected value CallOperandVal for indirect asm operand");
8550 SDValue Store
= DAG
.getStore(Chain
, getCurSDLoc(), Val
, getValue(Ptr
),
8551 MachinePointerInfo(Ptr
));
8552 OutChains
.push_back(Store
);
8554 // generate CopyFromRegs to associated registers.
8555 assert(!CS
.getType()->isVoidTy() && "Bad inline asm!");
8556 if (Val
.getOpcode() == ISD::MERGE_VALUES
) {
8557 for (const SDValue
&V
: Val
->op_values())
8560 handleRegAssign(Val
);
8566 if (!ResultValues
.empty()) {
8567 assert(CurResultType
== ResultTypes
.end() &&
8568 "Mismatch in number of ResultTypes");
8569 assert(ResultValues
.size() == ResultTypes
.size() &&
8570 "Mismatch in number of output operands in asm result");
8572 SDValue V
= DAG
.getNode(ISD::MERGE_VALUES
, getCurSDLoc(),
8573 DAG
.getVTList(ResultVTs
), ResultValues
);
8574 setValue(CS
.getInstruction(), V
);
8577 // Collect store chains.
8578 if (!OutChains
.empty())
8579 Chain
= DAG
.getNode(ISD::TokenFactor
, getCurSDLoc(), MVT::Other
, OutChains
);
8581 // Only Update Root if inline assembly has a memory effect.
8582 if (ResultValues
.empty() || HasSideEffect
|| !OutChains
.empty() || IsCallBr
)
8586 void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS
,
8587 const Twine
&Message
) {
8588 LLVMContext
&Ctx
= *DAG
.getContext();
8589 Ctx
.emitError(CS
.getInstruction(), Message
);
8591 // Make sure we leave the DAG in a valid state
8592 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
8593 SmallVector
<EVT
, 1> ValueVTs
;
8594 ComputeValueVTs(TLI
, DAG
.getDataLayout(), CS
->getType(), ValueVTs
);
8596 if (ValueVTs
.empty())
8599 SmallVector
<SDValue
, 1> Ops
;
8600 for (unsigned i
= 0, e
= ValueVTs
.size(); i
!= e
; ++i
)
8601 Ops
.push_back(DAG
.getUNDEF(ValueVTs
[i
]));
8603 setValue(CS
.getInstruction(), DAG
.getMergeValues(Ops
, getCurSDLoc()));
8606 void SelectionDAGBuilder::visitVAStart(const CallInst
&I
) {
8607 DAG
.setRoot(DAG
.getNode(ISD::VASTART
, getCurSDLoc(),
8608 MVT::Other
, getRoot(),
8609 getValue(I
.getArgOperand(0)),
8610 DAG
.getSrcValue(I
.getArgOperand(0))));
8613 void SelectionDAGBuilder::visitVAArg(const VAArgInst
&I
) {
8614 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
8615 const DataLayout
&DL
= DAG
.getDataLayout();
8616 SDValue V
= DAG
.getVAArg(
8617 TLI
.getMemValueType(DAG
.getDataLayout(), I
.getType()), getCurSDLoc(),
8618 getRoot(), getValue(I
.getOperand(0)), DAG
.getSrcValue(I
.getOperand(0)),
8619 DL
.getABITypeAlignment(I
.getType()));
8620 DAG
.setRoot(V
.getValue(1));
8622 if (I
.getType()->isPointerTy())
8623 V
= DAG
.getPtrExtOrTrunc(
8624 V
, getCurSDLoc(), TLI
.getValueType(DAG
.getDataLayout(), I
.getType()));
8628 void SelectionDAGBuilder::visitVAEnd(const CallInst
&I
) {
8629 DAG
.setRoot(DAG
.getNode(ISD::VAEND
, getCurSDLoc(),
8630 MVT::Other
, getRoot(),
8631 getValue(I
.getArgOperand(0)),
8632 DAG
.getSrcValue(I
.getArgOperand(0))));
8635 void SelectionDAGBuilder::visitVACopy(const CallInst
&I
) {
8636 DAG
.setRoot(DAG
.getNode(ISD::VACOPY
, getCurSDLoc(),
8637 MVT::Other
, getRoot(),
8638 getValue(I
.getArgOperand(0)),
8639 getValue(I
.getArgOperand(1)),
8640 DAG
.getSrcValue(I
.getArgOperand(0)),
8641 DAG
.getSrcValue(I
.getArgOperand(1))));
8644 SDValue
SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG
&DAG
,
8645 const Instruction
&I
,
8647 const MDNode
*Range
= I
.getMetadata(LLVMContext::MD_range
);
8651 ConstantRange CR
= getConstantRangeFromMetadata(*Range
);
8652 if (CR
.isFullSet() || CR
.isEmptySet() || CR
.isUpperWrapped())
8655 APInt Lo
= CR
.getUnsignedMin();
8656 if (!Lo
.isMinValue())
8659 APInt Hi
= CR
.getUnsignedMax();
8660 unsigned Bits
= std::max(Hi
.getActiveBits(),
8661 static_cast<unsigned>(IntegerType::MIN_INT_BITS
));
8663 EVT SmallVT
= EVT::getIntegerVT(*DAG
.getContext(), Bits
);
8665 SDLoc SL
= getCurSDLoc();
8667 SDValue ZExt
= DAG
.getNode(ISD::AssertZext
, SL
, Op
.getValueType(), Op
,
8668 DAG
.getValueType(SmallVT
));
8669 unsigned NumVals
= Op
.getNode()->getNumValues();
8673 SmallVector
<SDValue
, 4> Ops
;
8675 Ops
.push_back(ZExt
);
8676 for (unsigned I
= 1; I
!= NumVals
; ++I
)
8677 Ops
.push_back(Op
.getValue(I
));
8679 return DAG
.getMergeValues(Ops
, SL
);
8682 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
8683 /// the call being lowered.
8685 /// This is a helper for lowering intrinsics that follow a target calling
8686 /// convention or require stack pointer adjustment. Only a subset of the
8687 /// intrinsic's operands need to participate in the calling convention.
8688 void SelectionDAGBuilder::populateCallLoweringInfo(
8689 TargetLowering::CallLoweringInfo
&CLI
, const CallBase
*Call
,
8690 unsigned ArgIdx
, unsigned NumArgs
, SDValue Callee
, Type
*ReturnTy
,
8691 bool IsPatchPoint
) {
8692 TargetLowering::ArgListTy Args
;
8693 Args
.reserve(NumArgs
);
8695 // Populate the argument list.
8696 // Attributes for args start at offset 1, after the return attribute.
8697 for (unsigned ArgI
= ArgIdx
, ArgE
= ArgIdx
+ NumArgs
;
8698 ArgI
!= ArgE
; ++ArgI
) {
8699 const Value
*V
= Call
->getOperand(ArgI
);
8701 assert(!V
->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
8703 TargetLowering::ArgListEntry Entry
;
8704 Entry
.Node
= getValue(V
);
8705 Entry
.Ty
= V
->getType();
8706 Entry
.setAttributes(Call
, ArgI
);
8707 Args
.push_back(Entry
);
8710 CLI
.setDebugLoc(getCurSDLoc())
8711 .setChain(getRoot())
8712 .setCallee(Call
->getCallingConv(), ReturnTy
, Callee
, std::move(Args
))
8713 .setDiscardResult(Call
->use_empty())
8714 .setIsPatchPoint(IsPatchPoint
);
8717 /// Add a stack map intrinsic call's live variable operands to a stackmap
8718 /// or patchpoint target node's operand list.
8720 /// Constants are converted to TargetConstants purely as an optimization to
8721 /// avoid constant materialization and register allocation.
8723 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
8724 /// generate addess computation nodes, and so FinalizeISel can convert the
8725 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
8726 /// address materialization and register allocation, but may also be required
8727 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
8728 /// alloca in the entry block, then the runtime may assume that the alloca's
8729 /// StackMap location can be read immediately after compilation and that the
8730 /// location is valid at any point during execution (this is similar to the
8731 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
8732 /// only available in a register, then the runtime would need to trap when
8733 /// execution reaches the StackMap in order to read the alloca's location.
8734 static void addStackMapLiveVars(ImmutableCallSite CS
, unsigned StartIdx
,
8735 const SDLoc
&DL
, SmallVectorImpl
<SDValue
> &Ops
,
8736 SelectionDAGBuilder
&Builder
) {
8737 for (unsigned i
= StartIdx
, e
= CS
.arg_size(); i
!= e
; ++i
) {
8738 SDValue OpVal
= Builder
.getValue(CS
.getArgument(i
));
8739 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(OpVal
)) {
8741 Builder
.DAG
.getTargetConstant(StackMaps::ConstantOp
, DL
, MVT::i64
));
8743 Builder
.DAG
.getTargetConstant(C
->getSExtValue(), DL
, MVT::i64
));
8744 } else if (FrameIndexSDNode
*FI
= dyn_cast
<FrameIndexSDNode
>(OpVal
)) {
8745 const TargetLowering
&TLI
= Builder
.DAG
.getTargetLoweringInfo();
8746 Ops
.push_back(Builder
.DAG
.getTargetFrameIndex(
8747 FI
->getIndex(), TLI
.getFrameIndexTy(Builder
.DAG
.getDataLayout())));
8749 Ops
.push_back(OpVal
);
8753 /// Lower llvm.experimental.stackmap directly to its target opcode.
8754 void SelectionDAGBuilder::visitStackmap(const CallInst
&CI
) {
8755 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
8756 // [live variables...])
8758 assert(CI
.getType()->isVoidTy() && "Stackmap cannot return a value.");
8760 SDValue Chain
, InFlag
, Callee
, NullPtr
;
8761 SmallVector
<SDValue
, 32> Ops
;
8763 SDLoc DL
= getCurSDLoc();
8764 Callee
= getValue(CI
.getCalledValue());
8765 NullPtr
= DAG
.getIntPtrConstant(0, DL
, true);
8767 // The stackmap intrinsic only records the live variables (the arguments
8768 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
8769 // intrinsic, this won't be lowered to a function call. This means we don't
8770 // have to worry about calling conventions and target specific lowering code.
8771 // Instead we perform the call lowering right here.
8773 // chain, flag = CALLSEQ_START(chain, 0, 0)
8774 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
8775 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
8777 Chain
= DAG
.getCALLSEQ_START(getRoot(), 0, 0, DL
);
8778 InFlag
= Chain
.getValue(1);
8780 // Add the <id> and <numBytes> constants.
8781 SDValue IDVal
= getValue(CI
.getOperand(PatchPointOpers::IDPos
));
8782 Ops
.push_back(DAG
.getTargetConstant(
8783 cast
<ConstantSDNode
>(IDVal
)->getZExtValue(), DL
, MVT::i64
));
8784 SDValue NBytesVal
= getValue(CI
.getOperand(PatchPointOpers::NBytesPos
));
8785 Ops
.push_back(DAG
.getTargetConstant(
8786 cast
<ConstantSDNode
>(NBytesVal
)->getZExtValue(), DL
,
8789 // Push live variables for the stack map.
8790 addStackMapLiveVars(&CI
, 2, DL
, Ops
, *this);
8792 // We are not pushing any register mask info here on the operands list,
8793 // because the stackmap doesn't clobber anything.
8795 // Push the chain and the glue flag.
8796 Ops
.push_back(Chain
);
8797 Ops
.push_back(InFlag
);
8799 // Create the STACKMAP node.
8800 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
8801 SDNode
*SM
= DAG
.getMachineNode(TargetOpcode::STACKMAP
, DL
, NodeTys
, Ops
);
8802 Chain
= SDValue(SM
, 0);
8803 InFlag
= Chain
.getValue(1);
8805 Chain
= DAG
.getCALLSEQ_END(Chain
, NullPtr
, NullPtr
, InFlag
, DL
);
8807 // Stackmaps don't generate values, so nothing goes into the NodeMap.
8809 // Set the root to the target-lowered call chain.
8812 // Inform the Frame Information that we have a stackmap in this function.
8813 FuncInfo
.MF
->getFrameInfo().setHasStackMap();
8816 /// Lower llvm.experimental.patchpoint directly to its target opcode.
8817 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS
,
8818 const BasicBlock
*EHPadBB
) {
8819 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
8824 // [live variables...])
8826 CallingConv::ID CC
= CS
.getCallingConv();
8827 bool IsAnyRegCC
= CC
== CallingConv::AnyReg
;
8828 bool HasDef
= !CS
->getType()->isVoidTy();
8829 SDLoc dl
= getCurSDLoc();
8830 SDValue Callee
= getValue(CS
->getOperand(PatchPointOpers::TargetPos
));
8832 // Handle immediate and symbolic callees.
8833 if (auto* ConstCallee
= dyn_cast
<ConstantSDNode
>(Callee
))
8834 Callee
= DAG
.getIntPtrConstant(ConstCallee
->getZExtValue(), dl
,
8836 else if (auto* SymbolicCallee
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
8837 Callee
= DAG
.getTargetGlobalAddress(SymbolicCallee
->getGlobal(),
8838 SDLoc(SymbolicCallee
),
8839 SymbolicCallee
->getValueType(0));
8841 // Get the real number of arguments participating in the call <numArgs>
8842 SDValue NArgVal
= getValue(CS
.getArgument(PatchPointOpers::NArgPos
));
8843 unsigned NumArgs
= cast
<ConstantSDNode
>(NArgVal
)->getZExtValue();
8845 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
8846 // Intrinsics include all meta-operands up to but not including CC.
8847 unsigned NumMetaOpers
= PatchPointOpers::CCPos
;
8848 assert(CS
.arg_size() >= NumMetaOpers
+ NumArgs
&&
8849 "Not enough arguments provided to the patchpoint intrinsic");
8851 // For AnyRegCC the arguments are lowered later on manually.
8852 unsigned NumCallArgs
= IsAnyRegCC
? 0 : NumArgs
;
8854 IsAnyRegCC
? Type::getVoidTy(*DAG
.getContext()) : CS
->getType();
8856 TargetLowering::CallLoweringInfo
CLI(DAG
);
8857 populateCallLoweringInfo(CLI
, cast
<CallBase
>(CS
.getInstruction()),
8858 NumMetaOpers
, NumCallArgs
, Callee
, ReturnTy
, true);
8859 std::pair
<SDValue
, SDValue
> Result
= lowerInvokable(CLI
, EHPadBB
);
8861 SDNode
*CallEnd
= Result
.second
.getNode();
8862 if (HasDef
&& (CallEnd
->getOpcode() == ISD::CopyFromReg
))
8863 CallEnd
= CallEnd
->getOperand(0).getNode();
8865 /// Get a call instruction from the call sequence chain.
8866 /// Tail calls are not allowed.
8867 assert(CallEnd
->getOpcode() == ISD::CALLSEQ_END
&&
8868 "Expected a callseq node.");
8869 SDNode
*Call
= CallEnd
->getOperand(0).getNode();
8870 bool HasGlue
= Call
->getGluedNode();
8872 // Replace the target specific call node with the patchable intrinsic.
8873 SmallVector
<SDValue
, 8> Ops
;
8875 // Add the <id> and <numBytes> constants.
8876 SDValue IDVal
= getValue(CS
->getOperand(PatchPointOpers::IDPos
));
8877 Ops
.push_back(DAG
.getTargetConstant(
8878 cast
<ConstantSDNode
>(IDVal
)->getZExtValue(), dl
, MVT::i64
));
8879 SDValue NBytesVal
= getValue(CS
->getOperand(PatchPointOpers::NBytesPos
));
8880 Ops
.push_back(DAG
.getTargetConstant(
8881 cast
<ConstantSDNode
>(NBytesVal
)->getZExtValue(), dl
,
8885 Ops
.push_back(Callee
);
8887 // Adjust <numArgs> to account for any arguments that have been passed on the
8889 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
8890 unsigned NumCallRegArgs
= Call
->getNumOperands() - (HasGlue
? 4 : 3);
8891 NumCallRegArgs
= IsAnyRegCC
? NumArgs
: NumCallRegArgs
;
8892 Ops
.push_back(DAG
.getTargetConstant(NumCallRegArgs
, dl
, MVT::i32
));
8894 // Add the calling convention
8895 Ops
.push_back(DAG
.getTargetConstant((unsigned)CC
, dl
, MVT::i32
));
8897 // Add the arguments we omitted previously. The register allocator should
8898 // place these in any free register.
8900 for (unsigned i
= NumMetaOpers
, e
= NumMetaOpers
+ NumArgs
; i
!= e
; ++i
)
8901 Ops
.push_back(getValue(CS
.getArgument(i
)));
8903 // Push the arguments from the call instruction up to the register mask.
8904 SDNode::op_iterator e
= HasGlue
? Call
->op_end()-2 : Call
->op_end()-1;
8905 Ops
.append(Call
->op_begin() + 2, e
);
8907 // Push live variables for the stack map.
8908 addStackMapLiveVars(CS
, NumMetaOpers
+ NumArgs
, dl
, Ops
, *this);
8910 // Push the register mask info.
8912 Ops
.push_back(*(Call
->op_end()-2));
8914 Ops
.push_back(*(Call
->op_end()-1));
8916 // Push the chain (this is originally the first operand of the call, but
8917 // becomes now the last or second to last operand).
8918 Ops
.push_back(*(Call
->op_begin()));
8920 // Push the glue flag (last operand).
8922 Ops
.push_back(*(Call
->op_end()-1));
8925 if (IsAnyRegCC
&& HasDef
) {
8926 // Create the return types based on the intrinsic definition
8927 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
8928 SmallVector
<EVT
, 3> ValueVTs
;
8929 ComputeValueVTs(TLI
, DAG
.getDataLayout(), CS
->getType(), ValueVTs
);
8930 assert(ValueVTs
.size() == 1 && "Expected only one return value type.");
8932 // There is always a chain and a glue type at the end
8933 ValueVTs
.push_back(MVT::Other
);
8934 ValueVTs
.push_back(MVT::Glue
);
8935 NodeTys
= DAG
.getVTList(ValueVTs
);
8937 NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
8939 // Replace the target specific call node with a PATCHPOINT node.
8940 MachineSDNode
*MN
= DAG
.getMachineNode(TargetOpcode::PATCHPOINT
,
8943 // Update the NodeMap.
8946 setValue(CS
.getInstruction(), SDValue(MN
, 0));
8948 setValue(CS
.getInstruction(), Result
.first
);
8951 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
8952 // call sequence. Furthermore the location of the chain and glue can change
8953 // when the AnyReg calling convention is used and the intrinsic returns a
8955 if (IsAnyRegCC
&& HasDef
) {
8956 SDValue From
[] = {SDValue(Call
, 0), SDValue(Call
, 1)};
8957 SDValue To
[] = {SDValue(MN
, 1), SDValue(MN
, 2)};
8958 DAG
.ReplaceAllUsesOfValuesWith(From
, To
, 2);
8960 DAG
.ReplaceAllUsesWith(Call
, MN
);
8961 DAG
.DeleteNode(Call
);
8963 // Inform the Frame Information that we have a patchpoint in this function.
8964 FuncInfo
.MF
->getFrameInfo().setHasPatchPoint();
8967 void SelectionDAGBuilder::visitVectorReduce(const CallInst
&I
,
8968 unsigned Intrinsic
) {
8969 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
8970 SDValue Op1
= getValue(I
.getArgOperand(0));
8972 if (I
.getNumArgOperands() > 1)
8973 Op2
= getValue(I
.getArgOperand(1));
8974 SDLoc dl
= getCurSDLoc();
8975 EVT VT
= TLI
.getValueType(DAG
.getDataLayout(), I
.getType());
8978 if (isa
<FPMathOperator
>(I
))
8979 FMF
= I
.getFastMathFlags();
8981 switch (Intrinsic
) {
8982 case Intrinsic::experimental_vector_reduce_v2_fadd
:
8983 if (FMF
.allowReassoc())
8984 Res
= DAG
.getNode(ISD::FADD
, dl
, VT
, Op1
,
8985 DAG
.getNode(ISD::VECREDUCE_FADD
, dl
, VT
, Op2
));
8987 Res
= DAG
.getNode(ISD::VECREDUCE_STRICT_FADD
, dl
, VT
, Op1
, Op2
);
8989 case Intrinsic::experimental_vector_reduce_v2_fmul
:
8990 if (FMF
.allowReassoc())
8991 Res
= DAG
.getNode(ISD::FMUL
, dl
, VT
, Op1
,
8992 DAG
.getNode(ISD::VECREDUCE_FMUL
, dl
, VT
, Op2
));
8994 Res
= DAG
.getNode(ISD::VECREDUCE_STRICT_FMUL
, dl
, VT
, Op1
, Op2
);
8996 case Intrinsic::experimental_vector_reduce_add
:
8997 Res
= DAG
.getNode(ISD::VECREDUCE_ADD
, dl
, VT
, Op1
);
8999 case Intrinsic::experimental_vector_reduce_mul
:
9000 Res
= DAG
.getNode(ISD::VECREDUCE_MUL
, dl
, VT
, Op1
);
9002 case Intrinsic::experimental_vector_reduce_and
:
9003 Res
= DAG
.getNode(ISD::VECREDUCE_AND
, dl
, VT
, Op1
);
9005 case Intrinsic::experimental_vector_reduce_or
:
9006 Res
= DAG
.getNode(ISD::VECREDUCE_OR
, dl
, VT
, Op1
);
9008 case Intrinsic::experimental_vector_reduce_xor
:
9009 Res
= DAG
.getNode(ISD::VECREDUCE_XOR
, dl
, VT
, Op1
);
9011 case Intrinsic::experimental_vector_reduce_smax
:
9012 Res
= DAG
.getNode(ISD::VECREDUCE_SMAX
, dl
, VT
, Op1
);
9014 case Intrinsic::experimental_vector_reduce_smin
:
9015 Res
= DAG
.getNode(ISD::VECREDUCE_SMIN
, dl
, VT
, Op1
);
9017 case Intrinsic::experimental_vector_reduce_umax
:
9018 Res
= DAG
.getNode(ISD::VECREDUCE_UMAX
, dl
, VT
, Op1
);
9020 case Intrinsic::experimental_vector_reduce_umin
:
9021 Res
= DAG
.getNode(ISD::VECREDUCE_UMIN
, dl
, VT
, Op1
);
9023 case Intrinsic::experimental_vector_reduce_fmax
:
9024 Res
= DAG
.getNode(ISD::VECREDUCE_FMAX
, dl
, VT
, Op1
);
9026 case Intrinsic::experimental_vector_reduce_fmin
:
9027 Res
= DAG
.getNode(ISD::VECREDUCE_FMIN
, dl
, VT
, Op1
);
9030 llvm_unreachable("Unhandled vector reduce intrinsic");
9035 /// Returns an AttributeList representing the attributes applied to the return
9036 /// value of the given call.
9037 static AttributeList
getReturnAttrs(TargetLowering::CallLoweringInfo
&CLI
) {
9038 SmallVector
<Attribute::AttrKind
, 2> Attrs
;
9040 Attrs
.push_back(Attribute::SExt
);
9042 Attrs
.push_back(Attribute::ZExt
);
9044 Attrs
.push_back(Attribute::InReg
);
9046 return AttributeList::get(CLI
.RetTy
->getContext(), AttributeList::ReturnIndex
,
9050 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
9051 /// implementation, which just calls LowerCall.
9052 /// FIXME: When all targets are
9053 /// migrated to using LowerCall, this hook should be integrated into SDISel.
9054 std::pair
<SDValue
, SDValue
>
9055 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo
&CLI
) const {
9056 // Handle the incoming return values from the call.
9058 Type
*OrigRetTy
= CLI
.RetTy
;
9059 SmallVector
<EVT
, 4> RetTys
;
9060 SmallVector
<uint64_t, 4> Offsets
;
9061 auto &DL
= CLI
.DAG
.getDataLayout();
9062 ComputeValueVTs(*this, DL
, CLI
.RetTy
, RetTys
, &Offsets
);
9064 if (CLI
.IsPostTypeLegalization
) {
9065 // If we are lowering a libcall after legalization, split the return type.
9066 SmallVector
<EVT
, 4> OldRetTys
;
9067 SmallVector
<uint64_t, 4> OldOffsets
;
9068 RetTys
.swap(OldRetTys
);
9069 Offsets
.swap(OldOffsets
);
9071 for (size_t i
= 0, e
= OldRetTys
.size(); i
!= e
; ++i
) {
9072 EVT RetVT
= OldRetTys
[i
];
9073 uint64_t Offset
= OldOffsets
[i
];
9074 MVT RegisterVT
= getRegisterType(CLI
.RetTy
->getContext(), RetVT
);
9075 unsigned NumRegs
= getNumRegisters(CLI
.RetTy
->getContext(), RetVT
);
9076 unsigned RegisterVTByteSZ
= RegisterVT
.getSizeInBits() / 8;
9077 RetTys
.append(NumRegs
, RegisterVT
);
9078 for (unsigned j
= 0; j
!= NumRegs
; ++j
)
9079 Offsets
.push_back(Offset
+ j
* RegisterVTByteSZ
);
9083 SmallVector
<ISD::OutputArg
, 4> Outs
;
9084 GetReturnInfo(CLI
.CallConv
, CLI
.RetTy
, getReturnAttrs(CLI
), Outs
, *this, DL
);
9086 bool CanLowerReturn
=
9087 this->CanLowerReturn(CLI
.CallConv
, CLI
.DAG
.getMachineFunction(),
9088 CLI
.IsVarArg
, Outs
, CLI
.RetTy
->getContext());
9090 SDValue DemoteStackSlot
;
9091 int DemoteStackIdx
= -100;
9092 if (!CanLowerReturn
) {
9093 // FIXME: equivalent assert?
9094 // assert(!CS.hasInAllocaArgument() &&
9095 // "sret demotion is incompatible with inalloca");
9096 uint64_t TySize
= DL
.getTypeAllocSize(CLI
.RetTy
);
9097 unsigned Align
= DL
.getPrefTypeAlignment(CLI
.RetTy
);
9098 MachineFunction
&MF
= CLI
.DAG
.getMachineFunction();
9099 DemoteStackIdx
= MF
.getFrameInfo().CreateStackObject(TySize
, Align
, false);
9100 Type
*StackSlotPtrType
= PointerType::get(CLI
.RetTy
,
9101 DL
.getAllocaAddrSpace());
9103 DemoteStackSlot
= CLI
.DAG
.getFrameIndex(DemoteStackIdx
, getFrameIndexTy(DL
));
9105 Entry
.Node
= DemoteStackSlot
;
9106 Entry
.Ty
= StackSlotPtrType
;
9107 Entry
.IsSExt
= false;
9108 Entry
.IsZExt
= false;
9109 Entry
.IsInReg
= false;
9110 Entry
.IsSRet
= true;
9111 Entry
.IsNest
= false;
9112 Entry
.IsByVal
= false;
9113 Entry
.IsReturned
= false;
9114 Entry
.IsSwiftSelf
= false;
9115 Entry
.IsSwiftError
= false;
9116 Entry
.IsCFGuardTarget
= false;
9117 Entry
.Alignment
= Align
;
9118 CLI
.getArgs().insert(CLI
.getArgs().begin(), Entry
);
9119 CLI
.NumFixedArgs
+= 1;
9120 CLI
.RetTy
= Type::getVoidTy(CLI
.RetTy
->getContext());
9122 // sret demotion isn't compatible with tail-calls, since the sret argument
9123 // points into the callers stack frame.
9124 CLI
.IsTailCall
= false;
9126 bool NeedsRegBlock
= functionArgumentNeedsConsecutiveRegisters(
9127 CLI
.RetTy
, CLI
.CallConv
, CLI
.IsVarArg
);
9128 for (unsigned I
= 0, E
= RetTys
.size(); I
!= E
; ++I
) {
9129 ISD::ArgFlagsTy Flags
;
9130 if (NeedsRegBlock
) {
9131 Flags
.setInConsecutiveRegs();
9132 if (I
== RetTys
.size() - 1)
9133 Flags
.setInConsecutiveRegsLast();
9136 MVT RegisterVT
= getRegisterTypeForCallingConv(CLI
.RetTy
->getContext(),
9138 unsigned NumRegs
= getNumRegistersForCallingConv(CLI
.RetTy
->getContext(),
9140 for (unsigned i
= 0; i
!= NumRegs
; ++i
) {
9141 ISD::InputArg MyFlags
;
9142 MyFlags
.Flags
= Flags
;
9143 MyFlags
.VT
= RegisterVT
;
9145 MyFlags
.Used
= CLI
.IsReturnValueUsed
;
9146 if (CLI
.RetTy
->isPointerTy()) {
9147 MyFlags
.Flags
.setPointer();
9148 MyFlags
.Flags
.setPointerAddrSpace(
9149 cast
<PointerType
>(CLI
.RetTy
)->getAddressSpace());
9152 MyFlags
.Flags
.setSExt();
9154 MyFlags
.Flags
.setZExt();
9156 MyFlags
.Flags
.setInReg();
9157 CLI
.Ins
.push_back(MyFlags
);
9162 // We push in swifterror return as the last element of CLI.Ins.
9163 ArgListTy
&Args
= CLI
.getArgs();
9164 if (supportSwiftError()) {
9165 for (unsigned i
= 0, e
= Args
.size(); i
!= e
; ++i
) {
9166 if (Args
[i
].IsSwiftError
) {
9167 ISD::InputArg MyFlags
;
9168 MyFlags
.VT
= getPointerTy(DL
);
9169 MyFlags
.ArgVT
= EVT(getPointerTy(DL
));
9170 MyFlags
.Flags
.setSwiftError();
9171 CLI
.Ins
.push_back(MyFlags
);
9176 // Handle all of the outgoing arguments.
9178 CLI
.OutVals
.clear();
9179 for (unsigned i
= 0, e
= Args
.size(); i
!= e
; ++i
) {
9180 SmallVector
<EVT
, 4> ValueVTs
;
9181 ComputeValueVTs(*this, DL
, Args
[i
].Ty
, ValueVTs
);
9182 // FIXME: Split arguments if CLI.IsPostTypeLegalization
9183 Type
*FinalType
= Args
[i
].Ty
;
9184 if (Args
[i
].IsByVal
)
9185 FinalType
= cast
<PointerType
>(Args
[i
].Ty
)->getElementType();
9186 bool NeedsRegBlock
= functionArgumentNeedsConsecutiveRegisters(
9187 FinalType
, CLI
.CallConv
, CLI
.IsVarArg
);
9188 for (unsigned Value
= 0, NumValues
= ValueVTs
.size(); Value
!= NumValues
;
9190 EVT VT
= ValueVTs
[Value
];
9191 Type
*ArgTy
= VT
.getTypeForEVT(CLI
.RetTy
->getContext());
9192 SDValue Op
= SDValue(Args
[i
].Node
.getNode(),
9193 Args
[i
].Node
.getResNo() + Value
);
9194 ISD::ArgFlagsTy Flags
;
9196 // Certain targets (such as MIPS), may have a different ABI alignment
9197 // for a type depending on the context. Give the target a chance to
9198 // specify the alignment it wants.
9199 const Align
OriginalAlignment(getABIAlignmentForCallingConv(ArgTy
, DL
));
9201 if (Args
[i
].Ty
->isPointerTy()) {
9203 Flags
.setPointerAddrSpace(
9204 cast
<PointerType
>(Args
[i
].Ty
)->getAddressSpace());
9210 if (Args
[i
].IsInReg
) {
9211 // If we are using vectorcall calling convention, a structure that is
9212 // passed InReg - is surely an HVA
9213 if (CLI
.CallConv
== CallingConv::X86_VectorCall
&&
9214 isa
<StructType
>(FinalType
)) {
9215 // The first value of a structure is marked
9217 Flags
.setHvaStart();
9225 if (Args
[i
].IsSwiftSelf
)
9226 Flags
.setSwiftSelf();
9227 if (Args
[i
].IsSwiftError
)
9228 Flags
.setSwiftError();
9229 if (Args
[i
].IsCFGuardTarget
)
9230 Flags
.setCFGuardTarget();
9231 if (Args
[i
].IsByVal
)
9233 if (Args
[i
].IsInAlloca
) {
9234 Flags
.setInAlloca();
9235 // Set the byval flag for CCAssignFn callbacks that don't know about
9236 // inalloca. This way we can know how many bytes we should've allocated
9237 // and how many bytes a callee cleanup function will pop. If we port
9238 // inalloca to more targets, we'll have to add custom inalloca handling
9239 // in the various CC lowering callbacks.
9242 if (Args
[i
].IsByVal
|| Args
[i
].IsInAlloca
) {
9243 PointerType
*Ty
= cast
<PointerType
>(Args
[i
].Ty
);
9244 Type
*ElementTy
= Ty
->getElementType();
9246 unsigned FrameSize
= DL
.getTypeAllocSize(
9247 Args
[i
].ByValType
? Args
[i
].ByValType
: ElementTy
);
9248 Flags
.setByValSize(FrameSize
);
9250 // info is not there but there are cases it cannot get right.
9251 unsigned FrameAlign
;
9252 if (Args
[i
].Alignment
)
9253 FrameAlign
= Args
[i
].Alignment
;
9255 FrameAlign
= getByValTypeAlignment(ElementTy
, DL
);
9256 Flags
.setByValAlign(Align(FrameAlign
));
9261 Flags
.setInConsecutiveRegs();
9262 Flags
.setOrigAlign(OriginalAlignment
);
9264 MVT PartVT
= getRegisterTypeForCallingConv(CLI
.RetTy
->getContext(),
9266 unsigned NumParts
= getNumRegistersForCallingConv(CLI
.RetTy
->getContext(),
9268 SmallVector
<SDValue
, 4> Parts(NumParts
);
9269 ISD::NodeType ExtendKind
= ISD::ANY_EXTEND
;
9272 ExtendKind
= ISD::SIGN_EXTEND
;
9273 else if (Args
[i
].IsZExt
)
9274 ExtendKind
= ISD::ZERO_EXTEND
;
9276 // Conservatively only handle 'returned' on non-vectors that can be lowered,
9278 if (Args
[i
].IsReturned
&& !Op
.getValueType().isVector() &&
9280 assert((CLI
.RetTy
== Args
[i
].Ty
||
9281 (CLI
.RetTy
->isPointerTy() && Args
[i
].Ty
->isPointerTy() &&
9282 CLI
.RetTy
->getPointerAddressSpace() ==
9283 Args
[i
].Ty
->getPointerAddressSpace())) &&
9284 RetTys
.size() == NumValues
&& "unexpected use of 'returned'");
9285 // Before passing 'returned' to the target lowering code, ensure that
9286 // either the register MVT and the actual EVT are the same size or that
9287 // the return value and argument are extended in the same way; in these
9288 // cases it's safe to pass the argument register value unchanged as the
9289 // return register value (although it's at the target's option whether
9291 // TODO: allow code generation to take advantage of partially preserved
9292 // registers rather than clobbering the entire register when the
9293 // parameter extension method is not compatible with the return
9295 if ((NumParts
* PartVT
.getSizeInBits() == VT
.getSizeInBits()) ||
9296 (ExtendKind
!= ISD::ANY_EXTEND
&& CLI
.RetSExt
== Args
[i
].IsSExt
&&
9297 CLI
.RetZExt
== Args
[i
].IsZExt
))
9298 Flags
.setReturned();
9301 getCopyToParts(CLI
.DAG
, CLI
.DL
, Op
, &Parts
[0], NumParts
, PartVT
,
9302 CLI
.CS
.getInstruction(), CLI
.CallConv
, ExtendKind
);
9304 for (unsigned j
= 0; j
!= NumParts
; ++j
) {
9305 // if it isn't first piece, alignment must be 1
9306 // For scalable vectors the scalable part is currently handled
9307 // by individual targets, so we just use the known minimum size here.
9308 ISD::OutputArg
MyFlags(Flags
, Parts
[j
].getValueType(), VT
,
9309 i
< CLI
.NumFixedArgs
, i
,
9310 j
*Parts
[j
].getValueType().getStoreSize().getKnownMinSize());
9311 if (NumParts
> 1 && j
== 0)
9312 MyFlags
.Flags
.setSplit();
9314 MyFlags
.Flags
.setOrigAlign(Align::None());
9315 if (j
== NumParts
- 1)
9316 MyFlags
.Flags
.setSplitEnd();
9319 CLI
.Outs
.push_back(MyFlags
);
9320 CLI
.OutVals
.push_back(Parts
[j
]);
9323 if (NeedsRegBlock
&& Value
== NumValues
- 1)
9324 CLI
.Outs
[CLI
.Outs
.size() - 1].Flags
.setInConsecutiveRegsLast();
9328 SmallVector
<SDValue
, 4> InVals
;
9329 CLI
.Chain
= LowerCall(CLI
, InVals
);
9331 // Update CLI.InVals to use outside of this function.
9332 CLI
.InVals
= InVals
;
9334 // Verify that the target's LowerCall behaved as expected.
9335 assert(CLI
.Chain
.getNode() && CLI
.Chain
.getValueType() == MVT::Other
&&
9336 "LowerCall didn't return a valid chain!");
9337 assert((!CLI
.IsTailCall
|| InVals
.empty()) &&
9338 "LowerCall emitted a return value for a tail call!");
9339 assert((CLI
.IsTailCall
|| InVals
.size() == CLI
.Ins
.size()) &&
9340 "LowerCall didn't emit the correct number of values!");
9342 // For a tail call, the return value is merely live-out and there aren't
9343 // any nodes in the DAG representing it. Return a special value to
9344 // indicate that a tail call has been emitted and no more Instructions
9345 // should be processed in the current block.
9346 if (CLI
.IsTailCall
) {
9347 CLI
.DAG
.setRoot(CLI
.Chain
);
9348 return std::make_pair(SDValue(), SDValue());
9352 for (unsigned i
= 0, e
= CLI
.Ins
.size(); i
!= e
; ++i
) {
9353 assert(InVals
[i
].getNode() && "LowerCall emitted a null value!");
9354 assert(EVT(CLI
.Ins
[i
].VT
) == InVals
[i
].getValueType() &&
9355 "LowerCall emitted a value with the wrong type!");
9359 SmallVector
<SDValue
, 4> ReturnValues
;
9360 if (!CanLowerReturn
) {
9361 // The instruction result is the result of loading from the
9362 // hidden sret parameter.
9363 SmallVector
<EVT
, 1> PVTs
;
9364 Type
*PtrRetTy
= OrigRetTy
->getPointerTo(DL
.getAllocaAddrSpace());
9366 ComputeValueVTs(*this, DL
, PtrRetTy
, PVTs
);
9367 assert(PVTs
.size() == 1 && "Pointers should fit in one register");
9368 EVT PtrVT
= PVTs
[0];
9370 unsigned NumValues
= RetTys
.size();
9371 ReturnValues
.resize(NumValues
);
9372 SmallVector
<SDValue
, 4> Chains(NumValues
);
9374 // An aggregate return value cannot wrap around the address space, so
9375 // offsets to its parts don't wrap either.
9377 Flags
.setNoUnsignedWrap(true);
9379 for (unsigned i
= 0; i
< NumValues
; ++i
) {
9380 SDValue Add
= CLI
.DAG
.getNode(ISD::ADD
, CLI
.DL
, PtrVT
, DemoteStackSlot
,
9381 CLI
.DAG
.getConstant(Offsets
[i
], CLI
.DL
,
9383 SDValue L
= CLI
.DAG
.getLoad(
9384 RetTys
[i
], CLI
.DL
, CLI
.Chain
, Add
,
9385 MachinePointerInfo::getFixedStack(CLI
.DAG
.getMachineFunction(),
9386 DemoteStackIdx
, Offsets
[i
]),
9387 /* Alignment = */ 1);
9388 ReturnValues
[i
] = L
;
9389 Chains
[i
] = L
.getValue(1);
9392 CLI
.Chain
= CLI
.DAG
.getNode(ISD::TokenFactor
, CLI
.DL
, MVT::Other
, Chains
);
9394 // Collect the legal value parts into potentially illegal values
9395 // that correspond to the original function's return values.
9396 Optional
<ISD::NodeType
> AssertOp
;
9398 AssertOp
= ISD::AssertSext
;
9399 else if (CLI
.RetZExt
)
9400 AssertOp
= ISD::AssertZext
;
9401 unsigned CurReg
= 0;
9402 for (unsigned I
= 0, E
= RetTys
.size(); I
!= E
; ++I
) {
9404 MVT RegisterVT
= getRegisterTypeForCallingConv(CLI
.RetTy
->getContext(),
9406 unsigned NumRegs
= getNumRegistersForCallingConv(CLI
.RetTy
->getContext(),
9409 ReturnValues
.push_back(getCopyFromParts(CLI
.DAG
, CLI
.DL
, &InVals
[CurReg
],
9410 NumRegs
, RegisterVT
, VT
, nullptr,
9411 CLI
.CallConv
, AssertOp
));
9415 // For a function returning void, there is no return value. We can't create
9416 // such a node, so we just return a null return value in that case. In
9417 // that case, nothing will actually look at the value.
9418 if (ReturnValues
.empty())
9419 return std::make_pair(SDValue(), CLI
.Chain
);
9422 SDValue Res
= CLI
.DAG
.getNode(ISD::MERGE_VALUES
, CLI
.DL
,
9423 CLI
.DAG
.getVTList(RetTys
), ReturnValues
);
9424 return std::make_pair(Res
, CLI
.Chain
);
9427 void TargetLowering::LowerOperationWrapper(SDNode
*N
,
9428 SmallVectorImpl
<SDValue
> &Results
,
9429 SelectionDAG
&DAG
) const {
9430 if (SDValue Res
= LowerOperation(SDValue(N
, 0), DAG
))
9431 Results
.push_back(Res
);
9434 SDValue
TargetLowering::LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const {
9435 llvm_unreachable("LowerOperation not implemented for this target!");
9439 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value
*V
, unsigned Reg
) {
9440 SDValue Op
= getNonRegisterValue(V
);
9441 assert((Op
.getOpcode() != ISD::CopyFromReg
||
9442 cast
<RegisterSDNode
>(Op
.getOperand(1))->getReg() != Reg
) &&
9443 "Copy from a reg to the same reg!");
9444 assert(!Register::isPhysicalRegister(Reg
) && "Is a physreg");
9446 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
9447 // If this is an InlineAsm we have to match the registers required, not the
9448 // notional registers required by the type.
9450 RegsForValue
RFV(V
->getContext(), TLI
, DAG
.getDataLayout(), Reg
, V
->getType(),
9451 None
); // This is not an ABI copy.
9452 SDValue Chain
= DAG
.getEntryNode();
9454 ISD::NodeType ExtendType
= (FuncInfo
.PreferredExtendType
.find(V
) ==
9455 FuncInfo
.PreferredExtendType
.end())
9457 : FuncInfo
.PreferredExtendType
[V
];
9458 RFV
.getCopyToRegs(Op
, DAG
, getCurSDLoc(), Chain
, nullptr, V
, ExtendType
);
9459 PendingExports
.push_back(Chain
);
9462 #include "llvm/CodeGen/SelectionDAGISel.h"
9464 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
9465 /// entry block, return true. This includes arguments used by switches, since
9466 /// the switch may expand into multiple basic blocks.
9467 static bool isOnlyUsedInEntryBlock(const Argument
*A
, bool FastISel
) {
9468 // With FastISel active, we may be splitting blocks, so force creation
9469 // of virtual registers for all non-dead arguments.
9471 return A
->use_empty();
9473 const BasicBlock
&Entry
= A
->getParent()->front();
9474 for (const User
*U
: A
->users())
9475 if (cast
<Instruction
>(U
)->getParent() != &Entry
|| isa
<SwitchInst
>(U
))
9476 return false; // Use not in entry block.
9481 using ArgCopyElisionMapTy
=
9482 DenseMap
<const Argument
*,
9483 std::pair
<const AllocaInst
*, const StoreInst
*>>;
9485 /// Scan the entry block of the function in FuncInfo for arguments that look
9486 /// like copies into a local alloca. Record any copied arguments in
9487 /// ArgCopyElisionCandidates.
9489 findArgumentCopyElisionCandidates(const DataLayout
&DL
,
9490 FunctionLoweringInfo
*FuncInfo
,
9491 ArgCopyElisionMapTy
&ArgCopyElisionCandidates
) {
9492 // Record the state of every static alloca used in the entry block. Argument
9493 // allocas are all used in the entry block, so we need approximately as many
9494 // entries as we have arguments.
9495 enum StaticAllocaInfo
{ Unknown
, Clobbered
, Elidable
};
9496 SmallDenseMap
<const AllocaInst
*, StaticAllocaInfo
, 8> StaticAllocas
;
9497 unsigned NumArgs
= FuncInfo
->Fn
->arg_size();
9498 StaticAllocas
.reserve(NumArgs
* 2);
9500 auto GetInfoIfStaticAlloca
= [&](const Value
*V
) -> StaticAllocaInfo
* {
9503 V
= V
->stripPointerCasts();
9504 const auto *AI
= dyn_cast
<AllocaInst
>(V
);
9505 if (!AI
|| !AI
->isStaticAlloca() || !FuncInfo
->StaticAllocaMap
.count(AI
))
9507 auto Iter
= StaticAllocas
.insert({AI
, Unknown
});
9508 return &Iter
.first
->second
;
9511 // Look for stores of arguments to static allocas. Look through bitcasts and
9512 // GEPs to handle type coercions, as long as the alloca is fully initialized
9513 // by the store. Any non-store use of an alloca escapes it and any subsequent
9514 // unanalyzed store might write it.
9515 // FIXME: Handle structs initialized with multiple stores.
9516 for (const Instruction
&I
: FuncInfo
->Fn
->getEntryBlock()) {
9517 // Look for stores, and handle non-store uses conservatively.
9518 const auto *SI
= dyn_cast
<StoreInst
>(&I
);
9520 // We will look through cast uses, so ignore them completely.
9523 // Ignore debug info intrinsics, they don't escape or store to allocas.
9524 if (isa
<DbgInfoIntrinsic
>(I
))
9526 // This is an unknown instruction. Assume it escapes or writes to all
9527 // static alloca operands.
9528 for (const Use
&U
: I
.operands()) {
9529 if (StaticAllocaInfo
*Info
= GetInfoIfStaticAlloca(U
))
9530 *Info
= StaticAllocaInfo::Clobbered
;
9535 // If the stored value is a static alloca, mark it as escaped.
9536 if (StaticAllocaInfo
*Info
= GetInfoIfStaticAlloca(SI
->getValueOperand()))
9537 *Info
= StaticAllocaInfo::Clobbered
;
9539 // Check if the destination is a static alloca.
9540 const Value
*Dst
= SI
->getPointerOperand()->stripPointerCasts();
9541 StaticAllocaInfo
*Info
= GetInfoIfStaticAlloca(Dst
);
9544 const AllocaInst
*AI
= cast
<AllocaInst
>(Dst
);
9546 // Skip allocas that have been initialized or clobbered.
9547 if (*Info
!= StaticAllocaInfo::Unknown
)
9550 // Check if the stored value is an argument, and that this store fully
9551 // initializes the alloca. Don't elide copies from the same argument twice.
9552 const Value
*Val
= SI
->getValueOperand()->stripPointerCasts();
9553 const auto *Arg
= dyn_cast
<Argument
>(Val
);
9554 if (!Arg
|| Arg
->hasInAllocaAttr() || Arg
->hasByValAttr() ||
9555 Arg
->getType()->isEmptyTy() ||
9556 DL
.getTypeStoreSize(Arg
->getType()) !=
9557 DL
.getTypeAllocSize(AI
->getAllocatedType()) ||
9558 ArgCopyElisionCandidates
.count(Arg
)) {
9559 *Info
= StaticAllocaInfo::Clobbered
;
9563 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
9566 // Mark this alloca and store for argument copy elision.
9567 *Info
= StaticAllocaInfo::Elidable
;
9568 ArgCopyElisionCandidates
.insert({Arg
, {AI
, SI
}});
9570 // Stop scanning if we've seen all arguments. This will happen early in -O0
9571 // builds, which is useful, because -O0 builds have large entry blocks and
9573 if (ArgCopyElisionCandidates
.size() == NumArgs
)
9578 /// Try to elide argument copies from memory into a local alloca. Succeeds if
9579 /// ArgVal is a load from a suitable fixed stack object.
9580 static void tryToElideArgumentCopy(
9581 FunctionLoweringInfo
&FuncInfo
, SmallVectorImpl
<SDValue
> &Chains
,
9582 DenseMap
<int, int> &ArgCopyElisionFrameIndexMap
,
9583 SmallPtrSetImpl
<const Instruction
*> &ElidedArgCopyInstrs
,
9584 ArgCopyElisionMapTy
&ArgCopyElisionCandidates
, const Argument
&Arg
,
9585 SDValue ArgVal
, bool &ArgHasUses
) {
9586 // Check if this is a load from a fixed stack object.
9587 auto *LNode
= dyn_cast
<LoadSDNode
>(ArgVal
);
9590 auto *FINode
= dyn_cast
<FrameIndexSDNode
>(LNode
->getBasePtr().getNode());
9594 // Check that the fixed stack object is the right size and alignment.
9595 // Look at the alignment that the user wrote on the alloca instead of looking
9596 // at the stack object.
9597 auto ArgCopyIter
= ArgCopyElisionCandidates
.find(&Arg
);
9598 assert(ArgCopyIter
!= ArgCopyElisionCandidates
.end());
9599 const AllocaInst
*AI
= ArgCopyIter
->second
.first
;
9600 int FixedIndex
= FINode
->getIndex();
9601 int &AllocaIndex
= FuncInfo
.StaticAllocaMap
[AI
];
9602 int OldIndex
= AllocaIndex
;
9603 MachineFrameInfo
&MFI
= FuncInfo
.MF
->getFrameInfo();
9604 if (MFI
.getObjectSize(FixedIndex
) != MFI
.getObjectSize(OldIndex
)) {
9606 dbgs() << " argument copy elision failed due to bad fixed stack "
9610 unsigned RequiredAlignment
= AI
->getAlignment();
9611 if (!RequiredAlignment
) {
9612 RequiredAlignment
= FuncInfo
.MF
->getDataLayout().getABITypeAlignment(
9613 AI
->getAllocatedType());
9615 if (MFI
.getObjectAlignment(FixedIndex
) < RequiredAlignment
) {
9616 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
9617 "greater than stack argument alignment ("
9618 << RequiredAlignment
<< " vs "
9619 << MFI
.getObjectAlignment(FixedIndex
) << ")\n");
9623 // Perform the elision. Delete the old stack object and replace its only use
9624 // in the variable info map. Mark the stack object as mutable.
9626 dbgs() << "Eliding argument copy from " << Arg
<< " to " << *AI
<< '\n'
9627 << " Replacing frame index " << OldIndex
<< " with " << FixedIndex
9630 MFI
.RemoveStackObject(OldIndex
);
9631 MFI
.setIsImmutableObjectIndex(FixedIndex
, false);
9632 AllocaIndex
= FixedIndex
;
9633 ArgCopyElisionFrameIndexMap
.insert({OldIndex
, FixedIndex
});
9634 Chains
.push_back(ArgVal
.getValue(1));
9636 // Avoid emitting code for the store implementing the copy.
9637 const StoreInst
*SI
= ArgCopyIter
->second
.second
;
9638 ElidedArgCopyInstrs
.insert(SI
);
9640 // Check for uses of the argument again so that we can avoid exporting ArgVal
9641 // if it is't used by anything other than the store.
9642 for (const Value
*U
: Arg
.users()) {
9650 void SelectionDAGISel::LowerArguments(const Function
&F
) {
9651 SelectionDAG
&DAG
= SDB
->DAG
;
9652 SDLoc dl
= SDB
->getCurSDLoc();
9653 const DataLayout
&DL
= DAG
.getDataLayout();
9654 SmallVector
<ISD::InputArg
, 16> Ins
;
9656 if (!FuncInfo
->CanLowerReturn
) {
9657 // Put in an sret pointer parameter before all the other parameters.
9658 SmallVector
<EVT
, 1> ValueVTs
;
9659 ComputeValueVTs(*TLI
, DAG
.getDataLayout(),
9660 F
.getReturnType()->getPointerTo(
9661 DAG
.getDataLayout().getAllocaAddrSpace()),
9664 // NOTE: Assuming that a pointer will never break down to more than one VT
9666 ISD::ArgFlagsTy Flags
;
9668 MVT RegisterVT
= TLI
->getRegisterType(*DAG
.getContext(), ValueVTs
[0]);
9669 ISD::InputArg
RetArg(Flags
, RegisterVT
, ValueVTs
[0], true,
9670 ISD::InputArg::NoArgIndex
, 0);
9671 Ins
.push_back(RetArg
);
9674 // Look for stores of arguments to static allocas. Mark such arguments with a
9675 // flag to ask the target to give us the memory location of that argument if
9677 ArgCopyElisionMapTy ArgCopyElisionCandidates
;
9678 findArgumentCopyElisionCandidates(DL
, FuncInfo
.get(),
9679 ArgCopyElisionCandidates
);
9681 // Set up the incoming argument description vector.
9682 for (const Argument
&Arg
: F
.args()) {
9683 unsigned ArgNo
= Arg
.getArgNo();
9684 SmallVector
<EVT
, 4> ValueVTs
;
9685 ComputeValueVTs(*TLI
, DAG
.getDataLayout(), Arg
.getType(), ValueVTs
);
9686 bool isArgValueUsed
= !Arg
.use_empty();
9687 unsigned PartBase
= 0;
9688 Type
*FinalType
= Arg
.getType();
9689 if (Arg
.hasAttribute(Attribute::ByVal
))
9690 FinalType
= Arg
.getParamByValType();
9691 bool NeedsRegBlock
= TLI
->functionArgumentNeedsConsecutiveRegisters(
9692 FinalType
, F
.getCallingConv(), F
.isVarArg());
9693 for (unsigned Value
= 0, NumValues
= ValueVTs
.size();
9694 Value
!= NumValues
; ++Value
) {
9695 EVT VT
= ValueVTs
[Value
];
9696 Type
*ArgTy
= VT
.getTypeForEVT(*DAG
.getContext());
9697 ISD::ArgFlagsTy Flags
;
9699 // Certain targets (such as MIPS), may have a different ABI alignment
9700 // for a type depending on the context. Give the target a chance to
9701 // specify the alignment it wants.
9702 const Align
OriginalAlignment(
9703 TLI
->getABIAlignmentForCallingConv(ArgTy
, DL
));
9705 if (Arg
.getType()->isPointerTy()) {
9707 Flags
.setPointerAddrSpace(
9708 cast
<PointerType
>(Arg
.getType())->getAddressSpace());
9710 if (Arg
.hasAttribute(Attribute::ZExt
))
9712 if (Arg
.hasAttribute(Attribute::SExt
))
9714 if (Arg
.hasAttribute(Attribute::InReg
)) {
9715 // If we are using vectorcall calling convention, a structure that is
9716 // passed InReg - is surely an HVA
9717 if (F
.getCallingConv() == CallingConv::X86_VectorCall
&&
9718 isa
<StructType
>(Arg
.getType())) {
9719 // The first value of a structure is marked
9721 Flags
.setHvaStart();
9727 if (Arg
.hasAttribute(Attribute::StructRet
))
9729 if (Arg
.hasAttribute(Attribute::SwiftSelf
))
9730 Flags
.setSwiftSelf();
9731 if (Arg
.hasAttribute(Attribute::SwiftError
))
9732 Flags
.setSwiftError();
9733 if (Arg
.hasAttribute(Attribute::ByVal
))
9735 if (Arg
.hasAttribute(Attribute::InAlloca
)) {
9736 Flags
.setInAlloca();
9737 // Set the byval flag for CCAssignFn callbacks that don't know about
9738 // inalloca. This way we can know how many bytes we should've allocated
9739 // and how many bytes a callee cleanup function will pop. If we port
9740 // inalloca to more targets, we'll have to add custom inalloca handling
9741 // in the various CC lowering callbacks.
9744 if (F
.getCallingConv() == CallingConv::X86_INTR
) {
9745 // IA Interrupt passes frame (1st parameter) by value in the stack.
9749 if (Flags
.isByVal() || Flags
.isInAlloca()) {
9750 Type
*ElementTy
= Arg
.getParamByValType();
9752 // For ByVal, size and alignment should be passed from FE. BE will
9753 // guess if this info is not there but there are cases it cannot get
9755 unsigned FrameSize
= DL
.getTypeAllocSize(Arg
.getParamByValType());
9756 Flags
.setByValSize(FrameSize
);
9758 unsigned FrameAlign
;
9759 if (Arg
.getParamAlignment())
9760 FrameAlign
= Arg
.getParamAlignment();
9762 FrameAlign
= TLI
->getByValTypeAlignment(ElementTy
, DL
);
9763 Flags
.setByValAlign(Align(FrameAlign
));
9765 if (Arg
.hasAttribute(Attribute::Nest
))
9768 Flags
.setInConsecutiveRegs();
9769 Flags
.setOrigAlign(OriginalAlignment
);
9770 if (ArgCopyElisionCandidates
.count(&Arg
))
9771 Flags
.setCopyElisionCandidate();
9772 if (Arg
.hasAttribute(Attribute::Returned
))
9773 Flags
.setReturned();
9775 MVT RegisterVT
= TLI
->getRegisterTypeForCallingConv(
9776 *CurDAG
->getContext(), F
.getCallingConv(), VT
);
9777 unsigned NumRegs
= TLI
->getNumRegistersForCallingConv(
9778 *CurDAG
->getContext(), F
.getCallingConv(), VT
);
9779 for (unsigned i
= 0; i
!= NumRegs
; ++i
) {
9780 // For scalable vectors, use the minimum size; individual targets
9781 // are responsible for handling scalable vector arguments and
9783 ISD::InputArg
MyFlags(Flags
, RegisterVT
, VT
, isArgValueUsed
,
9784 ArgNo
, PartBase
+i
*RegisterVT
.getStoreSize().getKnownMinSize());
9785 if (NumRegs
> 1 && i
== 0)
9786 MyFlags
.Flags
.setSplit();
9787 // if it isn't first piece, alignment must be 1
9789 MyFlags
.Flags
.setOrigAlign(Align::None());
9790 if (i
== NumRegs
- 1)
9791 MyFlags
.Flags
.setSplitEnd();
9793 Ins
.push_back(MyFlags
);
9795 if (NeedsRegBlock
&& Value
== NumValues
- 1)
9796 Ins
[Ins
.size() - 1].Flags
.setInConsecutiveRegsLast();
9797 PartBase
+= VT
.getStoreSize().getKnownMinSize();
9801 // Call the target to set up the argument values.
9802 SmallVector
<SDValue
, 8> InVals
;
9803 SDValue NewRoot
= TLI
->LowerFormalArguments(
9804 DAG
.getRoot(), F
.getCallingConv(), F
.isVarArg(), Ins
, dl
, DAG
, InVals
);
9806 // Verify that the target's LowerFormalArguments behaved as expected.
9807 assert(NewRoot
.getNode() && NewRoot
.getValueType() == MVT::Other
&&
9808 "LowerFormalArguments didn't return a valid chain!");
9809 assert(InVals
.size() == Ins
.size() &&
9810 "LowerFormalArguments didn't emit the correct number of values!");
9812 for (unsigned i
= 0, e
= Ins
.size(); i
!= e
; ++i
) {
9813 assert(InVals
[i
].getNode() &&
9814 "LowerFormalArguments emitted a null value!");
9815 assert(EVT(Ins
[i
].VT
) == InVals
[i
].getValueType() &&
9816 "LowerFormalArguments emitted a value with the wrong type!");
9820 // Update the DAG with the new chain value resulting from argument lowering.
9821 DAG
.setRoot(NewRoot
);
9823 // Set up the argument values.
9825 if (!FuncInfo
->CanLowerReturn
) {
9826 // Create a virtual register for the sret pointer, and put in a copy
9827 // from the sret argument into it.
9828 SmallVector
<EVT
, 1> ValueVTs
;
9829 ComputeValueVTs(*TLI
, DAG
.getDataLayout(),
9830 F
.getReturnType()->getPointerTo(
9831 DAG
.getDataLayout().getAllocaAddrSpace()),
9833 MVT VT
= ValueVTs
[0].getSimpleVT();
9834 MVT RegVT
= TLI
->getRegisterType(*CurDAG
->getContext(), VT
);
9835 Optional
<ISD::NodeType
> AssertOp
= None
;
9836 SDValue ArgValue
= getCopyFromParts(DAG
, dl
, &InVals
[0], 1, RegVT
, VT
,
9837 nullptr, F
.getCallingConv(), AssertOp
);
9839 MachineFunction
& MF
= SDB
->DAG
.getMachineFunction();
9840 MachineRegisterInfo
& RegInfo
= MF
.getRegInfo();
9842 RegInfo
.createVirtualRegister(TLI
->getRegClassFor(RegVT
));
9843 FuncInfo
->DemoteRegister
= SRetReg
;
9845 SDB
->DAG
.getCopyToReg(NewRoot
, SDB
->getCurSDLoc(), SRetReg
, ArgValue
);
9846 DAG
.setRoot(NewRoot
);
9848 // i indexes lowered arguments. Bump it past the hidden sret argument.
9852 SmallVector
<SDValue
, 4> Chains
;
9853 DenseMap
<int, int> ArgCopyElisionFrameIndexMap
;
9854 for (const Argument
&Arg
: F
.args()) {
9855 SmallVector
<SDValue
, 4> ArgValues
;
9856 SmallVector
<EVT
, 4> ValueVTs
;
9857 ComputeValueVTs(*TLI
, DAG
.getDataLayout(), Arg
.getType(), ValueVTs
);
9858 unsigned NumValues
= ValueVTs
.size();
9862 bool ArgHasUses
= !Arg
.use_empty();
9864 // Elide the copying store if the target loaded this argument from a
9865 // suitable fixed stack object.
9866 if (Ins
[i
].Flags
.isCopyElisionCandidate()) {
9867 tryToElideArgumentCopy(*FuncInfo
, Chains
, ArgCopyElisionFrameIndexMap
,
9868 ElidedArgCopyInstrs
, ArgCopyElisionCandidates
, Arg
,
9869 InVals
[i
], ArgHasUses
);
9872 // If this argument is unused then remember its value. It is used to generate
9873 // debugging information.
9874 bool isSwiftErrorArg
=
9875 TLI
->supportSwiftError() &&
9876 Arg
.hasAttribute(Attribute::SwiftError
);
9877 if (!ArgHasUses
&& !isSwiftErrorArg
) {
9878 SDB
->setUnusedArgValue(&Arg
, InVals
[i
]);
9880 // Also remember any frame index for use in FastISel.
9881 if (FrameIndexSDNode
*FI
=
9882 dyn_cast
<FrameIndexSDNode
>(InVals
[i
].getNode()))
9883 FuncInfo
->setArgumentFrameIndex(&Arg
, FI
->getIndex());
9886 for (unsigned Val
= 0; Val
!= NumValues
; ++Val
) {
9887 EVT VT
= ValueVTs
[Val
];
9888 MVT PartVT
= TLI
->getRegisterTypeForCallingConv(*CurDAG
->getContext(),
9889 F
.getCallingConv(), VT
);
9890 unsigned NumParts
= TLI
->getNumRegistersForCallingConv(
9891 *CurDAG
->getContext(), F
.getCallingConv(), VT
);
9893 // Even an apparent 'unused' swifterror argument needs to be returned. So
9894 // we do generate a copy for it that can be used on return from the
9896 if (ArgHasUses
|| isSwiftErrorArg
) {
9897 Optional
<ISD::NodeType
> AssertOp
;
9898 if (Arg
.hasAttribute(Attribute::SExt
))
9899 AssertOp
= ISD::AssertSext
;
9900 else if (Arg
.hasAttribute(Attribute::ZExt
))
9901 AssertOp
= ISD::AssertZext
;
9903 ArgValues
.push_back(getCopyFromParts(DAG
, dl
, &InVals
[i
], NumParts
,
9904 PartVT
, VT
, nullptr,
9905 F
.getCallingConv(), AssertOp
));
9911 // We don't need to do anything else for unused arguments.
9912 if (ArgValues
.empty())
9915 // Note down frame index.
9916 if (FrameIndexSDNode
*FI
=
9917 dyn_cast
<FrameIndexSDNode
>(ArgValues
[0].getNode()))
9918 FuncInfo
->setArgumentFrameIndex(&Arg
, FI
->getIndex());
9920 SDValue Res
= DAG
.getMergeValues(makeArrayRef(ArgValues
.data(), NumValues
),
9921 SDB
->getCurSDLoc());
9923 SDB
->setValue(&Arg
, Res
);
9924 if (!TM
.Options
.EnableFastISel
&& Res
.getOpcode() == ISD::BUILD_PAIR
) {
9925 // We want to associate the argument with the frame index, among
9926 // involved operands, that correspond to the lowest address. The
9927 // getCopyFromParts function, called earlier, is swapping the order of
9928 // the operands to BUILD_PAIR depending on endianness. The result of
9929 // that swapping is that the least significant bits of the argument will
9930 // be in the first operand of the BUILD_PAIR node, and the most
9931 // significant bits will be in the second operand.
9932 unsigned LowAddressOp
= DAG
.getDataLayout().isBigEndian() ? 1 : 0;
9933 if (LoadSDNode
*LNode
=
9934 dyn_cast
<LoadSDNode
>(Res
.getOperand(LowAddressOp
).getNode()))
9935 if (FrameIndexSDNode
*FI
=
9936 dyn_cast
<FrameIndexSDNode
>(LNode
->getBasePtr().getNode()))
9937 FuncInfo
->setArgumentFrameIndex(&Arg
, FI
->getIndex());
9940 // Analyses past this point are naive and don't expect an assertion.
9941 if (Res
.getOpcode() == ISD::AssertZext
)
9942 Res
= Res
.getOperand(0);
9944 // Update the SwiftErrorVRegDefMap.
9945 if (Res
.getOpcode() == ISD::CopyFromReg
&& isSwiftErrorArg
) {
9946 unsigned Reg
= cast
<RegisterSDNode
>(Res
.getOperand(1))->getReg();
9947 if (Register::isVirtualRegister(Reg
))
9948 SwiftError
->setCurrentVReg(FuncInfo
->MBB
, SwiftError
->getFunctionArg(),
9952 // If this argument is live outside of the entry block, insert a copy from
9953 // wherever we got it to the vreg that other BB's will reference it as.
9954 if (Res
.getOpcode() == ISD::CopyFromReg
) {
9955 // If we can, though, try to skip creating an unnecessary vreg.
9956 // FIXME: This isn't very clean... it would be nice to make this more
9958 unsigned Reg
= cast
<RegisterSDNode
>(Res
.getOperand(1))->getReg();
9959 if (Register::isVirtualRegister(Reg
)) {
9960 FuncInfo
->ValueMap
[&Arg
] = Reg
;
9964 if (!isOnlyUsedInEntryBlock(&Arg
, TM
.Options
.EnableFastISel
)) {
9965 FuncInfo
->InitializeRegForValue(&Arg
);
9966 SDB
->CopyToExportRegsIfNeeded(&Arg
);
9970 if (!Chains
.empty()) {
9971 Chains
.push_back(NewRoot
);
9972 NewRoot
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Chains
);
9975 DAG
.setRoot(NewRoot
);
9977 assert(i
== InVals
.size() && "Argument register count mismatch!");
9979 // If any argument copy elisions occurred and we have debug info, update the
9980 // stale frame indices used in the dbg.declare variable info table.
9981 MachineFunction::VariableDbgInfoMapTy
&DbgDeclareInfo
= MF
->getVariableDbgInfo();
9982 if (!DbgDeclareInfo
.empty() && !ArgCopyElisionFrameIndexMap
.empty()) {
9983 for (MachineFunction::VariableDbgInfo
&VI
: DbgDeclareInfo
) {
9984 auto I
= ArgCopyElisionFrameIndexMap
.find(VI
.Slot
);
9985 if (I
!= ArgCopyElisionFrameIndexMap
.end())
9986 VI
.Slot
= I
->second
;
9990 // Finally, if the target has anything special to do, allow it to do so.
9991 EmitFunctionEntryCode();
9994 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
9995 /// ensure constants are generated when needed. Remember the virtual registers
9996 /// that need to be added to the Machine PHI nodes as input. We cannot just
9997 /// directly add them, because expansion might result in multiple MBB's for one
9998 /// BB. As such, the start of the BB might correspond to a different MBB than
10001 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock
*LLVMBB
) {
10002 const Instruction
*TI
= LLVMBB
->getTerminator();
10004 SmallPtrSet
<MachineBasicBlock
*, 4> SuccsHandled
;
10006 // Check PHI nodes in successors that expect a value to be available from this
10008 for (unsigned succ
= 0, e
= TI
->getNumSuccessors(); succ
!= e
; ++succ
) {
10009 const BasicBlock
*SuccBB
= TI
->getSuccessor(succ
);
10010 if (!isa
<PHINode
>(SuccBB
->begin())) continue;
10011 MachineBasicBlock
*SuccMBB
= FuncInfo
.MBBMap
[SuccBB
];
10013 // If this terminator has multiple identical successors (common for
10014 // switches), only handle each succ once.
10015 if (!SuccsHandled
.insert(SuccMBB
).second
)
10018 MachineBasicBlock::iterator MBBI
= SuccMBB
->begin();
10020 // At this point we know that there is a 1-1 correspondence between LLVM PHI
10021 // nodes and Machine PHI nodes, but the incoming operands have not been
10023 for (const PHINode
&PN
: SuccBB
->phis()) {
10024 // Ignore dead phi's.
10025 if (PN
.use_empty())
10028 // Skip empty types
10029 if (PN
.getType()->isEmptyTy())
10033 const Value
*PHIOp
= PN
.getIncomingValueForBlock(LLVMBB
);
10035 if (const Constant
*C
= dyn_cast
<Constant
>(PHIOp
)) {
10036 unsigned &RegOut
= ConstantsOut
[C
];
10038 RegOut
= FuncInfo
.CreateRegs(C
);
10039 CopyValueToVirtualRegister(C
, RegOut
);
10043 DenseMap
<const Value
*, unsigned>::iterator I
=
10044 FuncInfo
.ValueMap
.find(PHIOp
);
10045 if (I
!= FuncInfo
.ValueMap
.end())
10048 assert(isa
<AllocaInst
>(PHIOp
) &&
10049 FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(PHIOp
)) &&
10050 "Didn't codegen value into a register!??");
10051 Reg
= FuncInfo
.CreateRegs(PHIOp
);
10052 CopyValueToVirtualRegister(PHIOp
, Reg
);
10056 // Remember that this register needs to added to the machine PHI node as
10057 // the input for this MBB.
10058 SmallVector
<EVT
, 4> ValueVTs
;
10059 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
10060 ComputeValueVTs(TLI
, DAG
.getDataLayout(), PN
.getType(), ValueVTs
);
10061 for (unsigned vti
= 0, vte
= ValueVTs
.size(); vti
!= vte
; ++vti
) {
10062 EVT VT
= ValueVTs
[vti
];
10063 unsigned NumRegisters
= TLI
.getNumRegisters(*DAG
.getContext(), VT
);
10064 for (unsigned i
= 0, e
= NumRegisters
; i
!= e
; ++i
)
10065 FuncInfo
.PHINodesToUpdate
.push_back(
10066 std::make_pair(&*MBBI
++, Reg
+ i
));
10067 Reg
+= NumRegisters
;
10072 ConstantsOut
.clear();
10075 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
10077 MachineBasicBlock
*
10078 SelectionDAGBuilder::StackProtectorDescriptor::
10079 AddSuccessorMBB(const BasicBlock
*BB
,
10080 MachineBasicBlock
*ParentMBB
,
10082 MachineBasicBlock
*SuccMBB
) {
10083 // If SuccBB has not been created yet, create it.
10085 MachineFunction
*MF
= ParentMBB
->getParent();
10086 MachineFunction::iterator
BBI(ParentMBB
);
10087 SuccMBB
= MF
->CreateMachineBasicBlock(BB
);
10088 MF
->insert(++BBI
, SuccMBB
);
10090 // Add it as a successor of ParentMBB.
10091 ParentMBB
->addSuccessor(
10092 SuccMBB
, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely
));
10096 MachineBasicBlock
*SelectionDAGBuilder::NextBlock(MachineBasicBlock
*MBB
) {
10097 MachineFunction::iterator
I(MBB
);
10098 if (++I
== FuncInfo
.MF
->end())
10103 /// During lowering new call nodes can be created (such as memset, etc.).
10104 /// Those will become new roots of the current DAG, but complications arise
10105 /// when they are tail calls. In such cases, the call lowering will update
10106 /// the root, but the builder still needs to know that a tail call has been
10107 /// lowered in order to avoid generating an additional return.
10108 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC
) {
10109 // If the node is null, we do have a tail call.
10110 if (MaybeTC
.getNode() != nullptr)
10111 DAG
.setRoot(MaybeTC
);
10113 HasTailCall
= true;
10116 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W
, Value
*Cond
,
10117 MachineBasicBlock
*SwitchMBB
,
10118 MachineBasicBlock
*DefaultMBB
) {
10119 MachineFunction
*CurMF
= FuncInfo
.MF
;
10120 MachineBasicBlock
*NextMBB
= nullptr;
10121 MachineFunction::iterator
BBI(W
.MBB
);
10122 if (++BBI
!= FuncInfo
.MF
->end())
10125 unsigned Size
= W
.LastCluster
- W
.FirstCluster
+ 1;
10127 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
10129 if (Size
== 2 && W
.MBB
== SwitchMBB
) {
10130 // If any two of the cases has the same destination, and if one value
10131 // is the same as the other, but has one bit unset that the other has set,
10132 // use bit manipulation to do two compares at once. For example:
10133 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
10134 // TODO: This could be extended to merge any 2 cases in switches with 3
10136 // TODO: Handle cases where W.CaseBB != SwitchBB.
10137 CaseCluster
&Small
= *W
.FirstCluster
;
10138 CaseCluster
&Big
= *W
.LastCluster
;
10140 if (Small
.Low
== Small
.High
&& Big
.Low
== Big
.High
&&
10141 Small
.MBB
== Big
.MBB
) {
10142 const APInt
&SmallValue
= Small
.Low
->getValue();
10143 const APInt
&BigValue
= Big
.Low
->getValue();
10145 // Check that there is only one bit different.
10146 APInt CommonBit
= BigValue
^ SmallValue
;
10147 if (CommonBit
.isPowerOf2()) {
10148 SDValue CondLHS
= getValue(Cond
);
10149 EVT VT
= CondLHS
.getValueType();
10150 SDLoc DL
= getCurSDLoc();
10152 SDValue Or
= DAG
.getNode(ISD::OR
, DL
, VT
, CondLHS
,
10153 DAG
.getConstant(CommonBit
, DL
, VT
));
10154 SDValue Cond
= DAG
.getSetCC(
10155 DL
, MVT::i1
, Or
, DAG
.getConstant(BigValue
| SmallValue
, DL
, VT
),
10158 // Update successor info.
10159 // Both Small and Big will jump to Small.BB, so we sum up the
10161 addSuccessorWithProb(SwitchMBB
, Small
.MBB
, Small
.Prob
+ Big
.Prob
);
10163 addSuccessorWithProb(
10164 SwitchMBB
, DefaultMBB
,
10165 // The default destination is the first successor in IR.
10166 BPI
->getEdgeProbability(SwitchMBB
->getBasicBlock(), (unsigned)0));
10168 addSuccessorWithProb(SwitchMBB
, DefaultMBB
);
10170 // Insert the true branch.
10172 DAG
.getNode(ISD::BRCOND
, DL
, MVT::Other
, getControlRoot(), Cond
,
10173 DAG
.getBasicBlock(Small
.MBB
));
10174 // Insert the false branch.
10175 BrCond
= DAG
.getNode(ISD::BR
, DL
, MVT::Other
, BrCond
,
10176 DAG
.getBasicBlock(DefaultMBB
));
10178 DAG
.setRoot(BrCond
);
10184 if (TM
.getOptLevel() != CodeGenOpt::None
) {
10185 // Here, we order cases by probability so the most likely case will be
10186 // checked first. However, two clusters can have the same probability in
10187 // which case their relative ordering is non-deterministic. So we use Low
10188 // as a tie-breaker as clusters are guaranteed to never overlap.
10189 llvm::sort(W
.FirstCluster
, W
.LastCluster
+ 1,
10190 [](const CaseCluster
&a
, const CaseCluster
&b
) {
10191 return a
.Prob
!= b
.Prob
?
10193 a
.Low
->getValue().slt(b
.Low
->getValue());
10196 // Rearrange the case blocks so that the last one falls through if possible
10197 // without changing the order of probabilities.
10198 for (CaseClusterIt I
= W
.LastCluster
; I
> W
.FirstCluster
; ) {
10200 if (I
->Prob
> W
.LastCluster
->Prob
)
10202 if (I
->Kind
== CC_Range
&& I
->MBB
== NextMBB
) {
10203 std::swap(*I
, *W
.LastCluster
);
10209 // Compute total probability.
10210 BranchProbability DefaultProb
= W
.DefaultProb
;
10211 BranchProbability UnhandledProbs
= DefaultProb
;
10212 for (CaseClusterIt I
= W
.FirstCluster
; I
<= W
.LastCluster
; ++I
)
10213 UnhandledProbs
+= I
->Prob
;
10215 MachineBasicBlock
*CurMBB
= W
.MBB
;
10216 for (CaseClusterIt I
= W
.FirstCluster
, E
= W
.LastCluster
; I
<= E
; ++I
) {
10217 bool FallthroughUnreachable
= false;
10218 MachineBasicBlock
*Fallthrough
;
10219 if (I
== W
.LastCluster
) {
10220 // For the last cluster, fall through to the default destination.
10221 Fallthrough
= DefaultMBB
;
10222 FallthroughUnreachable
= isa
<UnreachableInst
>(
10223 DefaultMBB
->getBasicBlock()->getFirstNonPHIOrDbg());
10225 Fallthrough
= CurMF
->CreateMachineBasicBlock(CurMBB
->getBasicBlock());
10226 CurMF
->insert(BBI
, Fallthrough
);
10227 // Put Cond in a virtual register to make it available from the new blocks.
10228 ExportFromCurrentBlock(Cond
);
10230 UnhandledProbs
-= I
->Prob
;
10233 case CC_JumpTable
: {
10234 // FIXME: Optimize away range check based on pivot comparisons.
10235 JumpTableHeader
*JTH
= &SL
->JTCases
[I
->JTCasesIndex
].first
;
10236 SwitchCG::JumpTable
*JT
= &SL
->JTCases
[I
->JTCasesIndex
].second
;
10238 // The jump block hasn't been inserted yet; insert it here.
10239 MachineBasicBlock
*JumpMBB
= JT
->MBB
;
10240 CurMF
->insert(BBI
, JumpMBB
);
10242 auto JumpProb
= I
->Prob
;
10243 auto FallthroughProb
= UnhandledProbs
;
10245 // If the default statement is a target of the jump table, we evenly
10246 // distribute the default probability to successors of CurMBB. Also
10247 // update the probability on the edge from JumpMBB to Fallthrough.
10248 for (MachineBasicBlock::succ_iterator SI
= JumpMBB
->succ_begin(),
10249 SE
= JumpMBB
->succ_end();
10251 if (*SI
== DefaultMBB
) {
10252 JumpProb
+= DefaultProb
/ 2;
10253 FallthroughProb
-= DefaultProb
/ 2;
10254 JumpMBB
->setSuccProbability(SI
, DefaultProb
/ 2);
10255 JumpMBB
->normalizeSuccProbs();
10260 if (FallthroughUnreachable
) {
10261 // Skip the range check if the fallthrough block is unreachable.
10262 JTH
->OmitRangeCheck
= true;
10265 if (!JTH
->OmitRangeCheck
)
10266 addSuccessorWithProb(CurMBB
, Fallthrough
, FallthroughProb
);
10267 addSuccessorWithProb(CurMBB
, JumpMBB
, JumpProb
);
10268 CurMBB
->normalizeSuccProbs();
10270 // The jump table header will be inserted in our current block, do the
10271 // range check, and fall through to our fallthrough block.
10272 JTH
->HeaderBB
= CurMBB
;
10273 JT
->Default
= Fallthrough
; // FIXME: Move Default to JumpTableHeader.
10275 // If we're in the right place, emit the jump table header right now.
10276 if (CurMBB
== SwitchMBB
) {
10277 visitJumpTableHeader(*JT
, *JTH
, SwitchMBB
);
10278 JTH
->Emitted
= true;
10282 case CC_BitTests
: {
10283 // FIXME: Optimize away range check based on pivot comparisons.
10284 BitTestBlock
*BTB
= &SL
->BitTestCases
[I
->BTCasesIndex
];
10286 // The bit test blocks haven't been inserted yet; insert them here.
10287 for (BitTestCase
&BTC
: BTB
->Cases
)
10288 CurMF
->insert(BBI
, BTC
.ThisBB
);
10290 // Fill in fields of the BitTestBlock.
10291 BTB
->Parent
= CurMBB
;
10292 BTB
->Default
= Fallthrough
;
10294 BTB
->DefaultProb
= UnhandledProbs
;
10295 // If the cases in bit test don't form a contiguous range, we evenly
10296 // distribute the probability on the edge to Fallthrough to two
10297 // successors of CurMBB.
10298 if (!BTB
->ContiguousRange
) {
10299 BTB
->Prob
+= DefaultProb
/ 2;
10300 BTB
->DefaultProb
-= DefaultProb
/ 2;
10303 if (FallthroughUnreachable
) {
10304 // Skip the range check if the fallthrough block is unreachable.
10305 BTB
->OmitRangeCheck
= true;
10308 // If we're in the right place, emit the bit test header right now.
10309 if (CurMBB
== SwitchMBB
) {
10310 visitBitTestHeader(*BTB
, SwitchMBB
);
10311 BTB
->Emitted
= true;
10316 const Value
*RHS
, *LHS
, *MHS
;
10318 if (I
->Low
== I
->High
) {
10319 // Check Cond == I->Low.
10325 // Check I->Low <= Cond <= I->High.
10332 // If Fallthrough is unreachable, fold away the comparison.
10333 if (FallthroughUnreachable
)
10336 // The false probability is the sum of all unhandled cases.
10337 CaseBlock
CB(CC
, LHS
, RHS
, MHS
, I
->MBB
, Fallthrough
, CurMBB
,
10338 getCurSDLoc(), I
->Prob
, UnhandledProbs
);
10340 if (CurMBB
== SwitchMBB
)
10341 visitSwitchCase(CB
, SwitchMBB
);
10343 SL
->SwitchCases
.push_back(CB
);
10348 CurMBB
= Fallthrough
;
10352 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster
&CC
,
10353 CaseClusterIt First
,
10354 CaseClusterIt Last
) {
10355 return std::count_if(First
, Last
+ 1, [&](const CaseCluster
&X
) {
10356 if (X
.Prob
!= CC
.Prob
)
10357 return X
.Prob
> CC
.Prob
;
10359 // Ties are broken by comparing the case value.
10360 return X
.Low
->getValue().slt(CC
.Low
->getValue());
10364 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList
&WorkList
,
10365 const SwitchWorkListItem
&W
,
10367 MachineBasicBlock
*SwitchMBB
) {
10368 assert(W
.FirstCluster
->Low
->getValue().slt(W
.LastCluster
->Low
->getValue()) &&
10369 "Clusters not sorted?");
10371 assert(W
.LastCluster
- W
.FirstCluster
+ 1 >= 2 && "Too small to split!");
10373 // Balance the tree based on branch probabilities to create a near-optimal (in
10374 // terms of search time given key frequency) binary search tree. See e.g. Kurt
10375 // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10376 CaseClusterIt LastLeft
= W
.FirstCluster
;
10377 CaseClusterIt FirstRight
= W
.LastCluster
;
10378 auto LeftProb
= LastLeft
->Prob
+ W
.DefaultProb
/ 2;
10379 auto RightProb
= FirstRight
->Prob
+ W
.DefaultProb
/ 2;
10381 // Move LastLeft and FirstRight towards each other from opposite directions to
10382 // find a partitioning of the clusters which balances the probability on both
10383 // sides. If LeftProb and RightProb are equal, alternate which side is
10384 // taken to ensure 0-probability nodes are distributed evenly.
10386 while (LastLeft
+ 1 < FirstRight
) {
10387 if (LeftProb
< RightProb
|| (LeftProb
== RightProb
&& (I
& 1)))
10388 LeftProb
+= (++LastLeft
)->Prob
;
10390 RightProb
+= (--FirstRight
)->Prob
;
10395 // Our binary search tree differs from a typical BST in that ours can have up
10396 // to three values in each leaf. The pivot selection above doesn't take that
10397 // into account, which means the tree might require more nodes and be less
10398 // efficient. We compensate for this here.
10400 unsigned NumLeft
= LastLeft
- W
.FirstCluster
+ 1;
10401 unsigned NumRight
= W
.LastCluster
- FirstRight
+ 1;
10403 if (std::min(NumLeft
, NumRight
) < 3 && std::max(NumLeft
, NumRight
) > 3) {
10404 // If one side has less than 3 clusters, and the other has more than 3,
10405 // consider taking a cluster from the other side.
10407 if (NumLeft
< NumRight
) {
10408 // Consider moving the first cluster on the right to the left side.
10409 CaseCluster
&CC
= *FirstRight
;
10410 unsigned RightSideRank
= caseClusterRank(CC
, FirstRight
, W
.LastCluster
);
10411 unsigned LeftSideRank
= caseClusterRank(CC
, W
.FirstCluster
, LastLeft
);
10412 if (LeftSideRank
<= RightSideRank
) {
10413 // Moving the cluster to the left does not demote it.
10419 assert(NumRight
< NumLeft
);
10420 // Consider moving the last element on the left to the right side.
10421 CaseCluster
&CC
= *LastLeft
;
10422 unsigned LeftSideRank
= caseClusterRank(CC
, W
.FirstCluster
, LastLeft
);
10423 unsigned RightSideRank
= caseClusterRank(CC
, FirstRight
, W
.LastCluster
);
10424 if (RightSideRank
<= LeftSideRank
) {
10425 // Moving the cluster to the right does not demot it.
10435 assert(LastLeft
+ 1 == FirstRight
);
10436 assert(LastLeft
>= W
.FirstCluster
);
10437 assert(FirstRight
<= W
.LastCluster
);
10439 // Use the first element on the right as pivot since we will make less-than
10440 // comparisons against it.
10441 CaseClusterIt PivotCluster
= FirstRight
;
10442 assert(PivotCluster
> W
.FirstCluster
);
10443 assert(PivotCluster
<= W
.LastCluster
);
10445 CaseClusterIt FirstLeft
= W
.FirstCluster
;
10446 CaseClusterIt LastRight
= W
.LastCluster
;
10448 const ConstantInt
*Pivot
= PivotCluster
->Low
;
10450 // New blocks will be inserted immediately after the current one.
10451 MachineFunction::iterator
BBI(W
.MBB
);
10454 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
10455 // we can branch to its destination directly if it's squeezed exactly in
10456 // between the known lower bound and Pivot - 1.
10457 MachineBasicBlock
*LeftMBB
;
10458 if (FirstLeft
== LastLeft
&& FirstLeft
->Kind
== CC_Range
&&
10459 FirstLeft
->Low
== W
.GE
&&
10460 (FirstLeft
->High
->getValue() + 1LL) == Pivot
->getValue()) {
10461 LeftMBB
= FirstLeft
->MBB
;
10463 LeftMBB
= FuncInfo
.MF
->CreateMachineBasicBlock(W
.MBB
->getBasicBlock());
10464 FuncInfo
.MF
->insert(BBI
, LeftMBB
);
10465 WorkList
.push_back(
10466 {LeftMBB
, FirstLeft
, LastLeft
, W
.GE
, Pivot
, W
.DefaultProb
/ 2});
10467 // Put Cond in a virtual register to make it available from the new blocks.
10468 ExportFromCurrentBlock(Cond
);
10471 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
10472 // single cluster, RHS.Low == Pivot, and we can branch to its destination
10473 // directly if RHS.High equals the current upper bound.
10474 MachineBasicBlock
*RightMBB
;
10475 if (FirstRight
== LastRight
&& FirstRight
->Kind
== CC_Range
&&
10476 W
.LT
&& (FirstRight
->High
->getValue() + 1ULL) == W
.LT
->getValue()) {
10477 RightMBB
= FirstRight
->MBB
;
10479 RightMBB
= FuncInfo
.MF
->CreateMachineBasicBlock(W
.MBB
->getBasicBlock());
10480 FuncInfo
.MF
->insert(BBI
, RightMBB
);
10481 WorkList
.push_back(
10482 {RightMBB
, FirstRight
, LastRight
, Pivot
, W
.LT
, W
.DefaultProb
/ 2});
10483 // Put Cond in a virtual register to make it available from the new blocks.
10484 ExportFromCurrentBlock(Cond
);
10487 // Create the CaseBlock record that will be used to lower the branch.
10488 CaseBlock
CB(ISD::SETLT
, Cond
, Pivot
, nullptr, LeftMBB
, RightMBB
, W
.MBB
,
10489 getCurSDLoc(), LeftProb
, RightProb
);
10491 if (W
.MBB
== SwitchMBB
)
10492 visitSwitchCase(CB
, SwitchMBB
);
10494 SL
->SwitchCases
.push_back(CB
);
10497 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
10498 // from the swith statement.
10499 static BranchProbability
scaleCaseProbality(BranchProbability CaseProb
,
10500 BranchProbability PeeledCaseProb
) {
10501 if (PeeledCaseProb
== BranchProbability::getOne())
10502 return BranchProbability::getZero();
10503 BranchProbability SwitchProb
= PeeledCaseProb
.getCompl();
10505 uint32_t Numerator
= CaseProb
.getNumerator();
10506 uint32_t Denominator
= SwitchProb
.scale(CaseProb
.getDenominator());
10507 return BranchProbability(Numerator
, std::max(Numerator
, Denominator
));
10510 // Try to peel the top probability case if it exceeds the threshold.
10511 // Return current MachineBasicBlock for the switch statement if the peeling
10513 // If the peeling is performed, return the newly created MachineBasicBlock
10514 // for the peeled switch statement. Also update Clusters to remove the peeled
10515 // case. PeeledCaseProb is the BranchProbability for the peeled case.
10516 MachineBasicBlock
*SelectionDAGBuilder::peelDominantCaseCluster(
10517 const SwitchInst
&SI
, CaseClusterVector
&Clusters
,
10518 BranchProbability
&PeeledCaseProb
) {
10519 MachineBasicBlock
*SwitchMBB
= FuncInfo
.MBB
;
10520 // Don't perform if there is only one cluster or optimizing for size.
10521 if (SwitchPeelThreshold
> 100 || !FuncInfo
.BPI
|| Clusters
.size() < 2 ||
10522 TM
.getOptLevel() == CodeGenOpt::None
||
10523 SwitchMBB
->getParent()->getFunction().hasMinSize())
10526 BranchProbability TopCaseProb
= BranchProbability(SwitchPeelThreshold
, 100);
10527 unsigned PeeledCaseIndex
= 0;
10528 bool SwitchPeeled
= false;
10529 for (unsigned Index
= 0; Index
< Clusters
.size(); ++Index
) {
10530 CaseCluster
&CC
= Clusters
[Index
];
10531 if (CC
.Prob
< TopCaseProb
)
10533 TopCaseProb
= CC
.Prob
;
10534 PeeledCaseIndex
= Index
;
10535 SwitchPeeled
= true;
10540 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
10541 << TopCaseProb
<< "\n");
10543 // Record the MBB for the peeled switch statement.
10544 MachineFunction::iterator
BBI(SwitchMBB
);
10546 MachineBasicBlock
*PeeledSwitchMBB
=
10547 FuncInfo
.MF
->CreateMachineBasicBlock(SwitchMBB
->getBasicBlock());
10548 FuncInfo
.MF
->insert(BBI
, PeeledSwitchMBB
);
10550 ExportFromCurrentBlock(SI
.getCondition());
10551 auto PeeledCaseIt
= Clusters
.begin() + PeeledCaseIndex
;
10552 SwitchWorkListItem W
= {SwitchMBB
, PeeledCaseIt
, PeeledCaseIt
,
10553 nullptr, nullptr, TopCaseProb
.getCompl()};
10554 lowerWorkItem(W
, SI
.getCondition(), SwitchMBB
, PeeledSwitchMBB
);
10556 Clusters
.erase(PeeledCaseIt
);
10557 for (CaseCluster
&CC
: Clusters
) {
10559 dbgs() << "Scale the probablity for one cluster, before scaling: "
10560 << CC
.Prob
<< "\n");
10561 CC
.Prob
= scaleCaseProbality(CC
.Prob
, TopCaseProb
);
10562 LLVM_DEBUG(dbgs() << "After scaling: " << CC
.Prob
<< "\n");
10564 PeeledCaseProb
= TopCaseProb
;
10565 return PeeledSwitchMBB
;
10568 void SelectionDAGBuilder::visitSwitch(const SwitchInst
&SI
) {
10569 // Extract cases from the switch.
10570 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
10571 CaseClusterVector Clusters
;
10572 Clusters
.reserve(SI
.getNumCases());
10573 for (auto I
: SI
.cases()) {
10574 MachineBasicBlock
*Succ
= FuncInfo
.MBBMap
[I
.getCaseSuccessor()];
10575 const ConstantInt
*CaseVal
= I
.getCaseValue();
10576 BranchProbability Prob
=
10577 BPI
? BPI
->getEdgeProbability(SI
.getParent(), I
.getSuccessorIndex())
10578 : BranchProbability(1, SI
.getNumCases() + 1);
10579 Clusters
.push_back(CaseCluster::range(CaseVal
, CaseVal
, Succ
, Prob
));
10582 MachineBasicBlock
*DefaultMBB
= FuncInfo
.MBBMap
[SI
.getDefaultDest()];
10584 // Cluster adjacent cases with the same destination. We do this at all
10585 // optimization levels because it's cheap to do and will make codegen faster
10586 // if there are many clusters.
10587 sortAndRangeify(Clusters
);
10589 // The branch probablity of the peeled case.
10590 BranchProbability PeeledCaseProb
= BranchProbability::getZero();
10591 MachineBasicBlock
*PeeledSwitchMBB
=
10592 peelDominantCaseCluster(SI
, Clusters
, PeeledCaseProb
);
10594 // If there is only the default destination, jump there directly.
10595 MachineBasicBlock
*SwitchMBB
= FuncInfo
.MBB
;
10596 if (Clusters
.empty()) {
10597 assert(PeeledSwitchMBB
== SwitchMBB
);
10598 SwitchMBB
->addSuccessor(DefaultMBB
);
10599 if (DefaultMBB
!= NextBlock(SwitchMBB
)) {
10600 DAG
.setRoot(DAG
.getNode(ISD::BR
, getCurSDLoc(), MVT::Other
,
10601 getControlRoot(), DAG
.getBasicBlock(DefaultMBB
)));
10606 SL
->findJumpTables(Clusters
, &SI
, DefaultMBB
, DAG
.getPSI(), DAG
.getBFI());
10607 SL
->findBitTestClusters(Clusters
, &SI
);
10610 dbgs() << "Case clusters: ";
10611 for (const CaseCluster
&C
: Clusters
) {
10612 if (C
.Kind
== CC_JumpTable
)
10614 if (C
.Kind
== CC_BitTests
)
10617 C
.Low
->getValue().print(dbgs(), true);
10618 if (C
.Low
!= C
.High
) {
10620 C
.High
->getValue().print(dbgs(), true);
10627 assert(!Clusters
.empty());
10628 SwitchWorkList WorkList
;
10629 CaseClusterIt First
= Clusters
.begin();
10630 CaseClusterIt Last
= Clusters
.end() - 1;
10631 auto DefaultProb
= getEdgeProbability(PeeledSwitchMBB
, DefaultMBB
);
10632 // Scale the branchprobability for DefaultMBB if the peel occurs and
10633 // DefaultMBB is not replaced.
10634 if (PeeledCaseProb
!= BranchProbability::getZero() &&
10635 DefaultMBB
== FuncInfo
.MBBMap
[SI
.getDefaultDest()])
10636 DefaultProb
= scaleCaseProbality(DefaultProb
, PeeledCaseProb
);
10637 WorkList
.push_back(
10638 {PeeledSwitchMBB
, First
, Last
, nullptr, nullptr, DefaultProb
});
10640 while (!WorkList
.empty()) {
10641 SwitchWorkListItem W
= WorkList
.back();
10642 WorkList
.pop_back();
10643 unsigned NumClusters
= W
.LastCluster
- W
.FirstCluster
+ 1;
10645 if (NumClusters
> 3 && TM
.getOptLevel() != CodeGenOpt::None
&&
10646 !DefaultMBB
->getParent()->getFunction().hasMinSize()) {
10647 // For optimized builds, lower large range as a balanced binary tree.
10648 splitWorkItem(WorkList
, W
, SI
.getCondition(), SwitchMBB
);
10652 lowerWorkItem(W
, SI
.getCondition(), SwitchMBB
, DefaultMBB
);
10656 void SelectionDAGBuilder::visitFreeze(const FreezeInst
&I
) {
10657 SDValue N
= getValue(I
.getOperand(0));