Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Target / X86 / X86CallingConv.cpp
blob7a58e9ae0692a93cc882374c4ab0cec4f2d1e98d
1 //=== X86CallingConv.cpp - X86 Custom Calling Convention Impl -*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of custom routines for the X86
10 // Calling Convention that aren't done by tablegen.
12 //===----------------------------------------------------------------------===//
14 #include "X86CallingConv.h"
15 #include "X86Subtarget.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/CodeGen/CallingConvLower.h"
18 #include "llvm/IR/CallingConv.h"
20 using namespace llvm;
22 /// When regcall calling convention compiled to 32 bit arch, special treatment
23 /// is required for 64 bit masks.
24 /// The value should be assigned to two GPRs.
25 /// \return true if registers were allocated and false otherwise.
26 static bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT,
27 MVT &LocVT,
28 CCValAssign::LocInfo &LocInfo,
29 ISD::ArgFlagsTy &ArgFlags,
30 CCState &State) {
31 // List of GPR registers that are available to store values in regcall
32 // calling convention.
33 static const MCPhysReg RegList[] = {X86::EAX, X86::ECX, X86::EDX, X86::EDI,
34 X86::ESI};
36 // The vector will save all the available registers for allocation.
37 SmallVector<unsigned, 5> AvailableRegs;
39 // searching for the available registers.
40 for (auto Reg : RegList) {
41 if (!State.isAllocated(Reg))
42 AvailableRegs.push_back(Reg);
45 const size_t RequiredGprsUponSplit = 2;
46 if (AvailableRegs.size() < RequiredGprsUponSplit)
47 return false; // Not enough free registers - continue the search.
49 // Allocating the available registers.
50 for (unsigned I = 0; I < RequiredGprsUponSplit; I++) {
52 // Marking the register as located.
53 unsigned Reg = State.AllocateReg(AvailableRegs[I]);
55 // Since we previously made sure that 2 registers are available
56 // we expect that a real register number will be returned.
57 assert(Reg && "Expecting a register will be available");
59 // Assign the value to the allocated register
60 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
63 // Successful in allocating regsiters - stop scanning next rules.
64 return true;
67 static ArrayRef<MCPhysReg> CC_X86_VectorCallGetSSEs(const MVT &ValVT) {
68 if (ValVT.is512BitVector()) {
69 static const MCPhysReg RegListZMM[] = {X86::ZMM0, X86::ZMM1, X86::ZMM2,
70 X86::ZMM3, X86::ZMM4, X86::ZMM5};
71 return makeArrayRef(std::begin(RegListZMM), std::end(RegListZMM));
74 if (ValVT.is256BitVector()) {
75 static const MCPhysReg RegListYMM[] = {X86::YMM0, X86::YMM1, X86::YMM2,
76 X86::YMM3, X86::YMM4, X86::YMM5};
77 return makeArrayRef(std::begin(RegListYMM), std::end(RegListYMM));
80 static const MCPhysReg RegListXMM[] = {X86::XMM0, X86::XMM1, X86::XMM2,
81 X86::XMM3, X86::XMM4, X86::XMM5};
82 return makeArrayRef(std::begin(RegListXMM), std::end(RegListXMM));
85 static ArrayRef<MCPhysReg> CC_X86_64_VectorCallGetGPRs() {
86 static const MCPhysReg RegListGPR[] = {X86::RCX, X86::RDX, X86::R8, X86::R9};
87 return makeArrayRef(std::begin(RegListGPR), std::end(RegListGPR));
90 static bool CC_X86_VectorCallAssignRegister(unsigned &ValNo, MVT &ValVT,
91 MVT &LocVT,
92 CCValAssign::LocInfo &LocInfo,
93 ISD::ArgFlagsTy &ArgFlags,
94 CCState &State) {
96 ArrayRef<MCPhysReg> RegList = CC_X86_VectorCallGetSSEs(ValVT);
97 bool Is64bit = static_cast<const X86Subtarget &>(
98 State.getMachineFunction().getSubtarget())
99 .is64Bit();
101 for (auto Reg : RegList) {
102 // If the register is not marked as allocated - assign to it.
103 if (!State.isAllocated(Reg)) {
104 unsigned AssigedReg = State.AllocateReg(Reg);
105 assert(AssigedReg == Reg && "Expecting a valid register allocation");
106 State.addLoc(
107 CCValAssign::getReg(ValNo, ValVT, AssigedReg, LocVT, LocInfo));
108 return true;
110 // If the register is marked as shadow allocated - assign to it.
111 if (Is64bit && State.IsShadowAllocatedReg(Reg)) {
112 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
113 return true;
117 llvm_unreachable("Clang should ensure that hva marked vectors will have "
118 "an available register.");
119 return false;
122 /// Vectorcall calling convention has special handling for vector types or
123 /// HVA for 64 bit arch.
124 /// For HVAs shadow registers might be allocated on the first pass
125 /// and actual XMM registers are allocated on the second pass.
126 /// For vector types, actual XMM registers are allocated on the first pass.
127 /// \return true if registers were allocated and false otherwise.
128 static bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
129 CCValAssign::LocInfo &LocInfo,
130 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
131 // On the second pass, go through the HVAs only.
132 if (ArgFlags.isSecArgPass()) {
133 if (ArgFlags.isHva())
134 return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo,
135 ArgFlags, State);
136 return true;
139 // Process only vector types as defined by vectorcall spec:
140 // "A vector type is either a floating-point type, for example,
141 // a float or double, or an SIMD vector type, for example, __m128 or __m256".
142 if (!(ValVT.isFloatingPoint() ||
143 (ValVT.isVector() && ValVT.getSizeInBits() >= 128))) {
144 // If R9 was already assigned it means that we are after the fourth element
145 // and because this is not an HVA / Vector type, we need to allocate
146 // shadow XMM register.
147 if (State.isAllocated(X86::R9)) {
148 // Assign shadow XMM register.
149 (void)State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT));
152 return false;
155 if (!ArgFlags.isHva() || ArgFlags.isHvaStart()) {
156 // Assign shadow GPR register.
157 (void)State.AllocateReg(CC_X86_64_VectorCallGetGPRs());
159 // Assign XMM register - (shadow for HVA and non-shadow for non HVA).
160 if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) {
161 // In Vectorcall Calling convention, additional shadow stack can be
162 // created on top of the basic 32 bytes of win64.
163 // It can happen if the fifth or sixth argument is vector type or HVA.
164 // At that case for each argument a shadow stack of 8 bytes is allocated.
165 if (Reg == X86::XMM4 || Reg == X86::XMM5)
166 State.AllocateStack(8, 8);
168 if (!ArgFlags.isHva()) {
169 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
170 return true; // Allocated a register - Stop the search.
175 // If this is an HVA - Stop the search,
176 // otherwise continue the search.
177 return ArgFlags.isHva();
180 /// Vectorcall calling convention has special handling for vector types or
181 /// HVA for 32 bit arch.
182 /// For HVAs actual XMM registers are allocated on the second pass.
183 /// For vector types, actual XMM registers are allocated on the first pass.
184 /// \return true if registers were allocated and false otherwise.
185 static bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
186 CCValAssign::LocInfo &LocInfo,
187 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
188 // On the second pass, go through the HVAs only.
189 if (ArgFlags.isSecArgPass()) {
190 if (ArgFlags.isHva())
191 return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo,
192 ArgFlags, State);
193 return true;
196 // Process only vector types as defined by vectorcall spec:
197 // "A vector type is either a floating point type, for example,
198 // a float or double, or an SIMD vector type, for example, __m128 or __m256".
199 if (!(ValVT.isFloatingPoint() ||
200 (ValVT.isVector() && ValVT.getSizeInBits() >= 128))) {
201 return false;
204 if (ArgFlags.isHva())
205 return true; // If this is an HVA - Stop the search.
207 // Assign XMM register.
208 if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) {
209 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
210 return true;
213 // In case we did not find an available XMM register for a vector -
214 // pass it indirectly.
215 // It is similar to CCPassIndirect, with the addition of inreg.
216 if (!ValVT.isFloatingPoint()) {
217 LocVT = MVT::i32;
218 LocInfo = CCValAssign::Indirect;
219 ArgFlags.setInReg();
222 return false; // No register was assigned - Continue the search.
225 static bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &,
226 CCValAssign::LocInfo &, ISD::ArgFlagsTy &,
227 CCState &) {
228 llvm_unreachable("The AnyReg calling convention is only supported by the "
229 "stackmap and patchpoint intrinsics.");
230 // gracefully fallback to X86 C calling convention on Release builds.
231 return false;
234 static bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
235 CCValAssign::LocInfo &LocInfo,
236 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
237 // This is similar to CCAssignToReg<[EAX, EDX, ECX]>, but makes sure
238 // not to split i64 and double between a register and stack
239 static const MCPhysReg RegList[] = {X86::EAX, X86::EDX, X86::ECX};
240 static const unsigned NumRegs = sizeof(RegList) / sizeof(RegList[0]);
242 SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs();
244 // If this is the first part of an double/i64/i128, or if we're already
245 // in the middle of a split, add to the pending list. If this is not
246 // the end of the split, return, otherwise go on to process the pending
247 // list
248 if (ArgFlags.isSplit() || !PendingMembers.empty()) {
249 PendingMembers.push_back(
250 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
251 if (!ArgFlags.isSplitEnd())
252 return true;
255 // If there are no pending members, we are not in the middle of a split,
256 // so do the usual inreg stuff.
257 if (PendingMembers.empty()) {
258 if (unsigned Reg = State.AllocateReg(RegList)) {
259 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
260 return true;
262 return false;
265 assert(ArgFlags.isSplitEnd());
267 // We now have the entire original argument in PendingMembers, so decide
268 // whether to use registers or the stack.
269 // Per the MCU ABI:
270 // a) To use registers, we need to have enough of them free to contain
271 // the entire argument.
272 // b) We never want to use more than 2 registers for a single argument.
274 unsigned FirstFree = State.getFirstUnallocated(RegList);
275 bool UseRegs = PendingMembers.size() <= std::min(2U, NumRegs - FirstFree);
277 for (auto &It : PendingMembers) {
278 if (UseRegs)
279 It.convertToReg(State.AllocateReg(RegList[FirstFree++]));
280 else
281 It.convertToMem(State.AllocateStack(4, 4));
282 State.addLoc(It);
285 PendingMembers.clear();
287 return true;
290 // Provides entry points of CC_X86 and RetCC_X86.
291 #include "X86GenCallingConv.inc"