1 //=== ARMCallingConv.cpp - ARM Custom CC Routines ---------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the custom routines for the ARM Calling Convention that
10 // aren't done by tablegen, and includes the table generated implementations.
12 //===----------------------------------------------------------------------===//
15 #include "ARMCallingConv.h"
16 #include "ARMSubtarget.h"
17 #include "ARMRegisterInfo.h"
20 // APCS f64 is in register pairs, possibly split to stack
21 static bool f64AssignAPCS(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
22 CCValAssign::LocInfo
&LocInfo
,
23 CCState
&State
, bool CanFail
) {
24 static const MCPhysReg RegList
[] = { ARM::R0
, ARM::R1
, ARM::R2
, ARM::R3
};
26 // Try to get the first register.
27 if (unsigned Reg
= State
.AllocateReg(RegList
))
28 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
30 // For the 2nd half of a v2f64, do not fail.
34 // Put the whole thing on the stack.
35 State
.addLoc(CCValAssign::getCustomMem(ValNo
, ValVT
,
36 State
.AllocateStack(8, 4),
41 // Try to get the second register.
42 if (unsigned Reg
= State
.AllocateReg(RegList
))
43 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
45 State
.addLoc(CCValAssign::getCustomMem(ValNo
, ValVT
,
46 State
.AllocateStack(4, 4),
51 static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
52 CCValAssign::LocInfo
&LocInfo
,
53 ISD::ArgFlagsTy
&ArgFlags
,
55 if (!f64AssignAPCS(ValNo
, ValVT
, LocVT
, LocInfo
, State
, true))
57 if (LocVT
== MVT::v2f64
&&
58 !f64AssignAPCS(ValNo
, ValVT
, LocVT
, LocInfo
, State
, false))
60 return true; // we handled it
63 // AAPCS f64 is in aligned register pairs
64 static bool f64AssignAAPCS(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
65 CCValAssign::LocInfo
&LocInfo
,
66 CCState
&State
, bool CanFail
) {
67 static const MCPhysReg HiRegList
[] = { ARM::R0
, ARM::R2
};
68 static const MCPhysReg LoRegList
[] = { ARM::R1
, ARM::R3
};
69 static const MCPhysReg ShadowRegList
[] = { ARM::R0
, ARM::R1
};
70 static const MCPhysReg GPRArgRegs
[] = { ARM::R0
, ARM::R1
, ARM::R2
, ARM::R3
};
72 unsigned Reg
= State
.AllocateReg(HiRegList
, ShadowRegList
);
75 // If we had R3 unallocated only, now we still must to waste it.
76 Reg
= State
.AllocateReg(GPRArgRegs
);
77 assert((!Reg
|| Reg
== ARM::R3
) && "Wrong GPRs usage for f64");
79 // For the 2nd half of a v2f64, do not just fail.
83 // Put the whole thing on the stack.
84 State
.addLoc(CCValAssign::getCustomMem(ValNo
, ValVT
,
85 State
.AllocateStack(8, 8),
91 for (i
= 0; i
< 2; ++i
)
92 if (HiRegList
[i
] == Reg
)
95 unsigned T
= State
.AllocateReg(LoRegList
[i
]);
97 assert(T
== LoRegList
[i
] && "Could not allocate register");
99 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
100 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, LoRegList
[i
],
105 static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
106 CCValAssign::LocInfo
&LocInfo
,
107 ISD::ArgFlagsTy
&ArgFlags
,
109 if (!f64AssignAAPCS(ValNo
, ValVT
, LocVT
, LocInfo
, State
, true))
111 if (LocVT
== MVT::v2f64
&&
112 !f64AssignAAPCS(ValNo
, ValVT
, LocVT
, LocInfo
, State
, false))
114 return true; // we handled it
117 static bool f64RetAssign(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
118 CCValAssign::LocInfo
&LocInfo
, CCState
&State
) {
119 static const MCPhysReg HiRegList
[] = { ARM::R0
, ARM::R2
};
120 static const MCPhysReg LoRegList
[] = { ARM::R1
, ARM::R3
};
122 unsigned Reg
= State
.AllocateReg(HiRegList
, LoRegList
);
124 return false; // we didn't handle it
127 for (i
= 0; i
< 2; ++i
)
128 if (HiRegList
[i
] == Reg
)
131 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, Reg
, LocVT
, LocInfo
));
132 State
.addLoc(CCValAssign::getCustomReg(ValNo
, ValVT
, LoRegList
[i
],
137 static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
138 CCValAssign::LocInfo
&LocInfo
,
139 ISD::ArgFlagsTy
&ArgFlags
,
141 if (!f64RetAssign(ValNo
, ValVT
, LocVT
, LocInfo
, State
))
143 if (LocVT
== MVT::v2f64
&& !f64RetAssign(ValNo
, ValVT
, LocVT
, LocInfo
, State
))
145 return true; // we handled it
148 static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo
, MVT
&ValVT
, MVT
&LocVT
,
149 CCValAssign::LocInfo
&LocInfo
,
150 ISD::ArgFlagsTy
&ArgFlags
,
152 return RetCC_ARM_APCS_Custom_f64(ValNo
, ValVT
, LocVT
, LocInfo
, ArgFlags
,
156 static const MCPhysReg RRegList
[] = { ARM::R0
, ARM::R1
, ARM::R2
, ARM::R3
};
158 static const MCPhysReg SRegList
[] = { ARM::S0
, ARM::S1
, ARM::S2
, ARM::S3
,
159 ARM::S4
, ARM::S5
, ARM::S6
, ARM::S7
,
160 ARM::S8
, ARM::S9
, ARM::S10
, ARM::S11
,
161 ARM::S12
, ARM::S13
, ARM::S14
, ARM::S15
};
162 static const MCPhysReg DRegList
[] = { ARM::D0
, ARM::D1
, ARM::D2
, ARM::D3
,
163 ARM::D4
, ARM::D5
, ARM::D6
, ARM::D7
};
164 static const MCPhysReg QRegList
[] = { ARM::Q0
, ARM::Q1
, ARM::Q2
, ARM::Q3
};
167 // Allocate part of an AAPCS HFA or HVA. We assume that each member of the HA
168 // has InConsecutiveRegs set, and that the last member also has
169 // InConsecutiveRegsLast set. We must process all members of the HA before
170 // we can allocate it, as we need to know the total number of registers that
171 // will be needed in order to (attempt to) allocate a contiguous block.
172 static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned &ValNo
, MVT
&ValVT
,
174 CCValAssign::LocInfo
&LocInfo
,
175 ISD::ArgFlagsTy
&ArgFlags
,
177 SmallVectorImpl
<CCValAssign
> &PendingMembers
= State
.getPendingLocs();
179 // AAPCS HFAs must have 1-4 elements, all of the same type
180 if (PendingMembers
.size() > 0)
181 assert(PendingMembers
[0].getLocVT() == LocVT
);
183 // Add the argument to the list to be allocated once we know the size of the
184 // aggregate. Store the type's required alignmnent as extra info for later: in
185 // the [N x i64] case all trace has been removed by the time we actually get
187 PendingMembers
.push_back(CCValAssign::getPending(ValNo
, ValVT
, LocVT
, LocInfo
,
188 ArgFlags
.getOrigAlign()));
190 if (!ArgFlags
.isInConsecutiveRegsLast())
193 // Try to allocate a contiguous block of registers, each of the correct
194 // size to hold one member.
195 auto &DL
= State
.getMachineFunction().getDataLayout();
196 unsigned StackAlign
= DL
.getStackAlignment().value();
197 unsigned Align
= std::min(PendingMembers
[0].getExtraInfo(), StackAlign
);
199 ArrayRef
<MCPhysReg
> RegList
;
200 switch (LocVT
.SimpleTy
) {
203 unsigned RegIdx
= State
.getFirstUnallocated(RegList
);
205 // First consume all registers that would give an unaligned object. Whether
206 // we go on stack or in regs, no-one will be using them in future.
207 unsigned RegAlign
= alignTo(Align
, 4) / 4;
208 while (RegIdx
% RegAlign
!= 0 && RegIdx
< RegList
.size())
209 State
.AllocateReg(RegList
[RegIdx
++]);
226 llvm_unreachable("Unexpected member type for block aggregate");
230 unsigned RegResult
= State
.AllocateRegBlock(RegList
, PendingMembers
.size());
232 for (SmallVectorImpl
<CCValAssign
>::iterator It
= PendingMembers
.begin();
233 It
!= PendingMembers
.end(); ++It
) {
234 It
->convertToReg(RegResult
);
238 PendingMembers
.clear();
242 // Register allocation failed, we'll be needing the stack
243 unsigned Size
= LocVT
.getSizeInBits() / 8;
244 if (LocVT
== MVT::i32
&& State
.getNextStackOffset() == 0) {
245 // If nothing else has used the stack until this point, a non-HFA aggregate
246 // can be split between regs and stack.
247 unsigned RegIdx
= State
.getFirstUnallocated(RegList
);
248 for (auto &It
: PendingMembers
) {
249 if (RegIdx
>= RegList
.size())
250 It
.convertToMem(State
.AllocateStack(Size
, Size
));
252 It
.convertToReg(State
.AllocateReg(RegList
[RegIdx
++]));
256 PendingMembers
.clear();
258 } else if (LocVT
!= MVT::i32
)
261 // Mark all regs as unavailable (AAPCS rule C.2.vfp for VFP, C.6 for core)
262 for (auto Reg
: RegList
)
263 State
.AllocateReg(Reg
);
265 // After the first item has been allocated, the rest are packed as tightly as
266 // possible. (E.g. an incoming i64 would have starting Align of 8, but we'll
267 // be allocating a bunch of i32 slots).
268 unsigned RestAlign
= std::min(Align
, Size
);
270 for (auto &It
: PendingMembers
) {
271 It
.convertToMem(State
.AllocateStack(Size
, Align
));
276 // All pending members have now been allocated
277 PendingMembers
.clear();
279 // This will be allocated by the last member of the aggregate
283 // Include the table generated calling convention implementations.
284 #include "ARMGenCallingConv.inc"