[x86] fix assert with horizontal math + broadcast of vector (PR43402)
[llvm-core.git] / lib / CodeGen / SelectionDAG / FunctionLoweringInfo.cpp
blob02805826ef0853afe2bd1454fdd8411d6c28e4b0
1 //===-- FunctionLoweringInfo.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating functions from LLVM IR into
10 // Machine IR.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/FunctionLoweringInfo.h"
15 #include "llvm/CodeGen/Analysis.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineFunction.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetFrameLowering.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/CodeGen/TargetSubtargetInfo.h"
25 #include "llvm/CodeGen/WasmEHFuncInfo.h"
26 #include "llvm/CodeGen/WinEHFuncInfo.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include "llvm/Target/TargetOptions.h"
39 #include <algorithm>
40 using namespace llvm;
42 #define DEBUG_TYPE "function-lowering-info"
44 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
45 /// PHI nodes or outside of the basic block that defines it, or used by a
46 /// switch or atomic instruction, which may expand to multiple basic blocks.
47 static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
48 if (I->use_empty()) return false;
49 if (isa<PHINode>(I)) return true;
50 const BasicBlock *BB = I->getParent();
51 for (const User *U : I->users())
52 if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
53 return true;
55 return false;
58 static ISD::NodeType getPreferredExtendForValue(const Value *V) {
59 // For the users of the source value being used for compare instruction, if
60 // the number of signed predicate is greater than unsigned predicate, we
61 // prefer to use SIGN_EXTEND.
63 // With this optimization, we would be able to reduce some redundant sign or
64 // zero extension instruction, and eventually more machine CSE opportunities
65 // can be exposed.
66 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
67 unsigned NumOfSigned = 0, NumOfUnsigned = 0;
68 for (const User *U : V->users()) {
69 if (const auto *CI = dyn_cast<CmpInst>(U)) {
70 NumOfSigned += CI->isSigned();
71 NumOfUnsigned += CI->isUnsigned();
74 if (NumOfSigned > NumOfUnsigned)
75 ExtendKind = ISD::SIGN_EXTEND;
77 return ExtendKind;
80 void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
81 SelectionDAG *DAG) {
82 Fn = &fn;
83 MF = &mf;
84 TLI = MF->getSubtarget().getTargetLowering();
85 RegInfo = &MF->getRegInfo();
86 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
87 unsigned StackAlign = TFI->getStackAlignment();
88 DA = DAG->getDivergenceAnalysis();
90 // Check whether the function can return without sret-demotion.
91 SmallVector<ISD::OutputArg, 4> Outs;
92 CallingConv::ID CC = Fn->getCallingConv();
94 GetReturnInfo(CC, Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
95 mf.getDataLayout());
96 CanLowerReturn =
97 TLI->CanLowerReturn(CC, *MF, Fn->isVarArg(), Outs, Fn->getContext());
99 // If this personality uses funclets, we need to do a bit more work.
100 DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects;
101 EHPersonality Personality = classifyEHPersonality(
102 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr);
103 if (isFuncletEHPersonality(Personality)) {
104 // Calculate state numbers if we haven't already.
105 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
106 if (Personality == EHPersonality::MSVC_CXX)
107 calculateWinCXXEHStateNumbers(&fn, EHInfo);
108 else if (isAsynchronousEHPersonality(Personality))
109 calculateSEHStateNumbers(&fn, EHInfo);
110 else if (Personality == EHPersonality::CoreCLR)
111 calculateClrEHStateNumbers(&fn, EHInfo);
113 // Map all BB references in the WinEH data to MBBs.
114 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
115 for (WinEHHandlerType &H : TBME.HandlerArray) {
116 if (const AllocaInst *AI = H.CatchObj.Alloca)
117 CatchObjects.insert({AI, {}}).first->second.push_back(
118 &H.CatchObj.FrameIndex);
119 else
120 H.CatchObj.FrameIndex = INT_MAX;
124 if (Personality == EHPersonality::Wasm_CXX) {
125 WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo();
126 calculateWasmEHInfo(&fn, EHInfo);
129 // Initialize the mapping of values to registers. This is only set up for
130 // instruction values that are used outside of the block that defines
131 // them.
132 for (const BasicBlock &BB : *Fn) {
133 for (const Instruction &I : BB) {
134 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
135 Type *Ty = AI->getAllocatedType();
136 unsigned Align =
137 std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty),
138 AI->getAlignment());
140 // Static allocas can be folded into the initial stack frame
141 // adjustment. For targets that don't realign the stack, don't
142 // do this if there is an extra alignment requirement.
143 if (AI->isStaticAlloca() &&
144 (TFI->isStackRealignable() || (Align <= StackAlign))) {
145 const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
146 uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty);
148 TySize *= CUI->getZExtValue(); // Get total allocated size.
149 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
150 int FrameIndex = INT_MAX;
151 auto Iter = CatchObjects.find(AI);
152 if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) {
153 FrameIndex = MF->getFrameInfo().CreateFixedObject(
154 TySize, 0, /*IsImmutable=*/false, /*isAliased=*/true);
155 MF->getFrameInfo().setObjectAlignment(FrameIndex, Align);
156 } else {
157 FrameIndex =
158 MF->getFrameInfo().CreateStackObject(TySize, Align, false, AI);
161 StaticAllocaMap[AI] = FrameIndex;
162 // Update the catch handler information.
163 if (Iter != CatchObjects.end()) {
164 for (int *CatchObjPtr : Iter->second)
165 *CatchObjPtr = FrameIndex;
167 } else {
168 // FIXME: Overaligned static allocas should be grouped into
169 // a single dynamic allocation instead of using a separate
170 // stack allocation for each one.
171 if (Align <= StackAlign)
172 Align = 0;
173 // Inform the Frame Information that we have variable-sized objects.
174 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, AI);
178 // Look for inline asm that clobbers the SP register.
179 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
180 ImmutableCallSite CS(&I);
181 if (isa<InlineAsm>(CS.getCalledValue())) {
182 unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
183 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
184 std::vector<TargetLowering::AsmOperandInfo> Ops =
185 TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, CS);
186 for (TargetLowering::AsmOperandInfo &Op : Ops) {
187 if (Op.Type == InlineAsm::isClobber) {
188 // Clobbers don't have SDValue operands, hence SDValue().
189 TLI->ComputeConstraintToUse(Op, SDValue(), DAG);
190 std::pair<unsigned, const TargetRegisterClass *> PhysReg =
191 TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode,
192 Op.ConstraintVT);
193 if (PhysReg.first == SP)
194 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
200 // Look for calls to the @llvm.va_start intrinsic. We can omit some
201 // prologue boilerplate for variadic functions that don't examine their
202 // arguments.
203 if (const auto *II = dyn_cast<IntrinsicInst>(&I)) {
204 if (II->getIntrinsicID() == Intrinsic::vastart)
205 MF->getFrameInfo().setHasVAStart(true);
208 // If we have a musttail call in a variadic function, we need to ensure we
209 // forward implicit register parameters.
210 if (const auto *CI = dyn_cast<CallInst>(&I)) {
211 if (CI->isMustTailCall() && Fn->isVarArg())
212 MF->getFrameInfo().setHasMustTailInVarArgFunc(true);
215 // Mark values used outside their block as exported, by allocating
216 // a virtual register for them.
217 if (isUsedOutsideOfDefiningBlock(&I))
218 if (!isa<AllocaInst>(I) || !StaticAllocaMap.count(cast<AllocaInst>(&I)))
219 InitializeRegForValue(&I);
221 // Decide the preferred extend type for a value.
222 PreferredExtendType[&I] = getPreferredExtendForValue(&I);
226 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
227 // also creates the initial PHI MachineInstrs, though none of the input
228 // operands are populated.
229 for (const BasicBlock &BB : *Fn) {
230 // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks
231 // are really data, and no instructions can live here.
232 if (BB.isEHPad()) {
233 const Instruction *PadInst = BB.getFirstNonPHI();
234 // If this is a non-landingpad EH pad, mark this function as using
235 // funclets.
236 // FIXME: SEH catchpads do not create EH scope/funclets, so we could avoid
237 // setting this in such cases in order to improve frame layout.
238 if (!isa<LandingPadInst>(PadInst)) {
239 MF->setHasEHScopes(true);
240 MF->setHasEHFunclets(true);
241 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
243 if (isa<CatchSwitchInst>(PadInst)) {
244 assert(&*BB.begin() == PadInst &&
245 "WinEHPrepare failed to remove PHIs from imaginary BBs");
246 continue;
248 if (isa<FuncletPadInst>(PadInst))
249 assert(&*BB.begin() == PadInst && "WinEHPrepare failed to demote PHIs");
252 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&BB);
253 MBBMap[&BB] = MBB;
254 MF->push_back(MBB);
256 // Transfer the address-taken flag. This is necessary because there could
257 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
258 // the first one should be marked.
259 if (BB.hasAddressTaken())
260 MBB->setHasAddressTaken();
262 // Mark landing pad blocks.
263 if (BB.isEHPad())
264 MBB->setIsEHPad();
266 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
267 // appropriate.
268 for (const PHINode &PN : BB.phis()) {
269 if (PN.use_empty())
270 continue;
272 // Skip empty types
273 if (PN.getType()->isEmptyTy())
274 continue;
276 DebugLoc DL = PN.getDebugLoc();
277 unsigned PHIReg = ValueMap[&PN];
278 assert(PHIReg && "PHI node does not have an assigned virtual register!");
280 SmallVector<EVT, 4> ValueVTs;
281 ComputeValueVTs(*TLI, MF->getDataLayout(), PN.getType(), ValueVTs);
282 for (EVT VT : ValueVTs) {
283 unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
284 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
285 for (unsigned i = 0; i != NumRegisters; ++i)
286 BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
287 PHIReg += NumRegisters;
292 if (isFuncletEHPersonality(Personality)) {
293 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
295 // Map all BB references in the WinEH data to MBBs.
296 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
297 for (WinEHHandlerType &H : TBME.HandlerArray) {
298 if (H.Handler)
299 H.Handler = MBBMap[H.Handler.get<const BasicBlock *>()];
302 for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap)
303 if (UME.Cleanup)
304 UME.Cleanup = MBBMap[UME.Cleanup.get<const BasicBlock *>()];
305 for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap) {
306 const auto *BB = UME.Handler.get<const BasicBlock *>();
307 UME.Handler = MBBMap[BB];
309 for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap) {
310 const auto *BB = CME.Handler.get<const BasicBlock *>();
311 CME.Handler = MBBMap[BB];
315 else if (Personality == EHPersonality::Wasm_CXX) {
316 WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo();
317 // Map all BB references in the WinEH data to MBBs.
318 DenseMap<BBOrMBB, BBOrMBB> NewMap;
319 for (auto &KV : EHInfo.EHPadUnwindMap) {
320 const auto *Src = KV.first.get<const BasicBlock *>();
321 const auto *Dst = KV.second.get<const BasicBlock *>();
322 NewMap[MBBMap[Src]] = MBBMap[Dst];
324 EHInfo.EHPadUnwindMap = std::move(NewMap);
328 /// clear - Clear out all the function-specific state. This returns this
329 /// FunctionLoweringInfo to an empty state, ready to be used for a
330 /// different function.
331 void FunctionLoweringInfo::clear() {
332 MBBMap.clear();
333 ValueMap.clear();
334 VirtReg2Value.clear();
335 StaticAllocaMap.clear();
336 LiveOutRegInfo.clear();
337 VisitedBBs.clear();
338 ArgDbgValues.clear();
339 DescribedArgs.clear();
340 ByValArgFrameIndexMap.clear();
341 RegFixups.clear();
342 RegsWithFixups.clear();
343 StatepointStackSlots.clear();
344 StatepointSpillMaps.clear();
345 PreferredExtendType.clear();
348 /// CreateReg - Allocate a single virtual register for the given type.
349 unsigned FunctionLoweringInfo::CreateReg(MVT VT, bool isDivergent) {
350 return RegInfo->createVirtualRegister(
351 MF->getSubtarget().getTargetLowering()->getRegClassFor(VT, isDivergent));
354 /// CreateRegs - Allocate the appropriate number of virtual registers of
355 /// the correctly promoted or expanded types. Assign these registers
356 /// consecutive vreg numbers and return the first assigned number.
358 /// In the case that the given value has struct or array type, this function
359 /// will assign registers for each member or element.
361 unsigned FunctionLoweringInfo::CreateRegs(Type *Ty, bool isDivergent) {
362 const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
364 SmallVector<EVT, 4> ValueVTs;
365 ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
367 unsigned FirstReg = 0;
368 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
369 EVT ValueVT = ValueVTs[Value];
370 MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT);
372 unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT);
373 for (unsigned i = 0; i != NumRegs; ++i) {
374 unsigned R = CreateReg(RegisterVT, isDivergent);
375 if (!FirstReg) FirstReg = R;
378 return FirstReg;
381 unsigned FunctionLoweringInfo::CreateRegs(const Value *V) {
382 return CreateRegs(V->getType(), DA && !TLI->requiresUniformRegister(*MF, V) &&
383 DA->isDivergent(V));
386 /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
387 /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
388 /// the register's LiveOutInfo is for a smaller bit width, it is extended to
389 /// the larger bit width by zero extension. The bit width must be no smaller
390 /// than the LiveOutInfo's existing bit width.
391 const FunctionLoweringInfo::LiveOutInfo *
392 FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
393 if (!LiveOutRegInfo.inBounds(Reg))
394 return nullptr;
396 LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
397 if (!LOI->IsValid)
398 return nullptr;
400 if (BitWidth > LOI->Known.getBitWidth()) {
401 LOI->NumSignBits = 1;
402 LOI->Known = LOI->Known.zext(BitWidth, false /* => any extend */);
405 return LOI;
408 /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
409 /// register based on the LiveOutInfo of its operands.
410 void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
411 Type *Ty = PN->getType();
412 if (!Ty->isIntegerTy() || Ty->isVectorTy())
413 return;
415 SmallVector<EVT, 1> ValueVTs;
416 ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
417 assert(ValueVTs.size() == 1 &&
418 "PHIs with non-vector integer types should have a single VT.");
419 EVT IntVT = ValueVTs[0];
421 if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1)
422 return;
423 IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT);
424 unsigned BitWidth = IntVT.getSizeInBits();
426 unsigned DestReg = ValueMap[PN];
427 if (!Register::isVirtualRegister(DestReg))
428 return;
429 LiveOutRegInfo.grow(DestReg);
430 LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
432 Value *V = PN->getIncomingValue(0);
433 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
434 DestLOI.NumSignBits = 1;
435 DestLOI.Known = KnownBits(BitWidth);
436 return;
439 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
440 APInt Val = CI->getValue().zextOrTrunc(BitWidth);
441 DestLOI.NumSignBits = Val.getNumSignBits();
442 DestLOI.Known.Zero = ~Val;
443 DestLOI.Known.One = Val;
444 } else {
445 assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
446 "CopyToReg node was created.");
447 unsigned SrcReg = ValueMap[V];
448 if (!Register::isVirtualRegister(SrcReg)) {
449 DestLOI.IsValid = false;
450 return;
452 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
453 if (!SrcLOI) {
454 DestLOI.IsValid = false;
455 return;
457 DestLOI = *SrcLOI;
460 assert(DestLOI.Known.Zero.getBitWidth() == BitWidth &&
461 DestLOI.Known.One.getBitWidth() == BitWidth &&
462 "Masks should have the same bit width as the type.");
464 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
465 Value *V = PN->getIncomingValue(i);
466 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
467 DestLOI.NumSignBits = 1;
468 DestLOI.Known = KnownBits(BitWidth);
469 return;
472 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
473 APInt Val = CI->getValue().zextOrTrunc(BitWidth);
474 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits());
475 DestLOI.Known.Zero &= ~Val;
476 DestLOI.Known.One &= Val;
477 continue;
480 assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
481 "its CopyToReg node was created.");
482 unsigned SrcReg = ValueMap[V];
483 if (!Register::isVirtualRegister(SrcReg)) {
484 DestLOI.IsValid = false;
485 return;
487 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
488 if (!SrcLOI) {
489 DestLOI.IsValid = false;
490 return;
492 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits);
493 DestLOI.Known.Zero &= SrcLOI->Known.Zero;
494 DestLOI.Known.One &= SrcLOI->Known.One;
498 /// setArgumentFrameIndex - Record frame index for the byval
499 /// argument. This overrides previous frame index entry for this argument,
500 /// if any.
501 void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
502 int FI) {
503 ByValArgFrameIndexMap[A] = FI;
506 /// getArgumentFrameIndex - Get frame index for the byval argument.
507 /// If the argument does not have any assigned frame index then 0 is
508 /// returned.
509 int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
510 auto I = ByValArgFrameIndexMap.find(A);
511 if (I != ByValArgFrameIndexMap.end())
512 return I->second;
513 LLVM_DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
514 return INT_MAX;
517 unsigned FunctionLoweringInfo::getCatchPadExceptionPointerVReg(
518 const Value *CPI, const TargetRegisterClass *RC) {
519 MachineRegisterInfo &MRI = MF->getRegInfo();
520 auto I = CatchPadExceptionPointers.insert({CPI, 0});
521 unsigned &VReg = I.first->second;
522 if (I.second)
523 VReg = MRI.createVirtualRegister(RC);
524 assert(VReg && "null vreg in exception pointer table!");
525 return VReg;
528 const Value *
529 FunctionLoweringInfo::getValueFromVirtualReg(unsigned Vreg) {
530 if (VirtReg2Value.empty()) {
531 SmallVector<EVT, 4> ValueVTs;
532 for (auto &P : ValueMap) {
533 ValueVTs.clear();
534 ComputeValueVTs(*TLI, Fn->getParent()->getDataLayout(),
535 P.first->getType(), ValueVTs);
536 unsigned Reg = P.second;
537 for (EVT VT : ValueVTs) {
538 unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
539 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
540 VirtReg2Value[Reg++] = P.first;
544 return VirtReg2Value.lookup(Vreg);