1 //=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This describes the calling conventions for AArch64 architecture.
11 //===----------------------------------------------------------------------===//
13 /// CCIfAlign - Match of the original alignment of the arg
14 class CCIfAlign<string Align, CCAction A> :
15 CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>;
16 /// CCIfBigEndian - Match only if we're in big endian mode.
17 class CCIfBigEndian<CCAction A> :
18 CCIf<"State.getMachineFunction().getDataLayout().isBigEndian()", A>;
20 class CCIfILP32<CCAction A> :
21 CCIf<"State.getMachineFunction().getDataLayout().getPointerSize() == 4", A>;
24 //===----------------------------------------------------------------------===//
25 // ARM AAPCS64 Calling Convention
26 //===----------------------------------------------------------------------===//
29 def CC_AArch64_AAPCS : CallingConv<[
30 CCIfType<[iPTR], CCBitConvertToType<i64>>,
31 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
32 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
34 // Big endian vectors must be passed as if they were 1-element vectors so that
35 // their lanes are in a consistent order.
36 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
37 CCBitConvertToType<f64>>>,
38 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
39 CCBitConvertToType<f128>>>,
41 // In AAPCS, an SRet is passed in X8, not X0 like a normal pointer parameter.
42 // However, on windows, in some circumstances, the SRet is passed in X0 or X1
43 // instead. The presence of the inreg attribute indicates that SRet is
44 // passed in the alternative register (X0 or X1), not X8:
45 // - X0 for non-instance methods.
46 // - X1 for instance methods.
48 // The "sret" attribute identifies indirect returns.
49 // The "inreg" attribute identifies non-aggregate types.
50 // The position of the "sret" attribute identifies instance/non-instance
52 // "sret" on argument 0 means non-instance methods.
53 // "sret" on argument 1 means instance methods.
55 CCIfInReg<CCIfType<[i64],
56 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1], [W0, W1]>>>>>,
58 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
60 // Put ByVal arguments directly on the stack. Minimum size and alignment of a
62 CCIfByVal<CCPassByVal<8, 8>>,
64 // The 'nest' parameter, if any, is passed in X18.
65 // Darwin uses X18 as the platform register and hence 'nest' isn't currently
67 CCIfNest<CCAssignToReg<[X18]>>,
69 // Pass SwiftSelf in a callee saved register.
70 CCIfSwiftSelf<CCIfType<[i64], CCAssignToRegWithShadow<[X20], [W20]>>>,
72 // A SwiftError is passed in X21.
73 CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X21], [W21]>>>,
75 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
77 CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
78 nxv2f32, nxv4f32, nxv2f64],
79 CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>,
80 CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
81 nxv2f32, nxv4f32, nxv2f64],
84 CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1],
85 CCAssignToReg<[P0, P1, P2, P3]>>,
86 CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1],
89 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
90 // up to eight each of GPR and FPR.
91 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
92 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
93 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
94 // i128 is split to two i64s, we can't fit half to register X7.
95 CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6],
98 // i128 is split to two i64s, and its stack alignment is 16 bytes.
99 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
101 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
102 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
103 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
104 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
105 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
106 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
107 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
108 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
109 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
110 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
111 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
112 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
113 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
115 // If more than will fit in registers, pass them on the stack instead.
116 CCIfType<[i1, i8, i16, f16], CCAssignToStack<8, 8>>,
117 CCIfType<[i32, f32], CCAssignToStack<8, 8>>,
118 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16],
119 CCAssignToStack<8, 8>>,
120 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
121 CCAssignToStack<16, 16>>
125 def RetCC_AArch64_AAPCS : CallingConv<[
126 CCIfType<[iPTR], CCBitConvertToType<i64>>,
127 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
128 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
130 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
131 CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X21], [W21]>>>,
133 // Big endian vectors must be passed as if they were 1-element vectors so that
134 // their lanes are in a consistent order.
135 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
136 CCBitConvertToType<f64>>>,
137 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
138 CCBitConvertToType<f128>>>,
140 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
141 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
142 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
143 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
144 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
145 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
146 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
147 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
148 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
149 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
150 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
151 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
152 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
153 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
154 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
155 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
157 CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
158 nxv2f32, nxv4f32, nxv2f64],
159 CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>,
161 CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1],
162 CCAssignToReg<[P0, P1, P2, P3]>>
165 // Vararg functions on windows pass floats in integer registers
167 def CC_AArch64_Win64_VarArg : CallingConv<[
168 CCIfType<[f16, f32], CCPromoteToType<f64>>,
169 CCIfType<[f64], CCBitConvertToType<i64>>,
170 CCDelegateTo<CC_AArch64_AAPCS>
173 // Windows Control Flow Guard checks take a single argument (the target function
174 // address) and have no return value.
176 def CC_AArch64_Win64_CFGuard_Check : CallingConv<[
177 CCIfType<[i64], CCAssignToReg<[X15]>>
181 // Darwin uses a calling convention which differs in only two ways
182 // from the standard one at this level:
183 // + i128s (i.e. split i64s) don't need even registers.
184 // + Stack slots are sized as needed rather than being at least 64-bit.
186 def CC_AArch64_DarwinPCS : CallingConv<[
187 CCIfType<[iPTR], CCBitConvertToType<i64>>,
188 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
189 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
191 // An SRet is passed in X8, not X0 like a normal pointer parameter.
192 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
194 // Put ByVal arguments directly on the stack. Minimum size and alignment of a
196 CCIfByVal<CCPassByVal<8, 8>>,
198 // Pass SwiftSelf in a callee saved register.
199 CCIfSwiftSelf<CCIfType<[i64], CCAssignToRegWithShadow<[X20], [W20]>>>,
201 // A SwiftError is passed in X21.
202 CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X21], [W21]>>>,
204 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
206 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
207 // up to eight each of GPR and FPR.
208 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
209 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
210 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
211 // i128 is split to two i64s, we can't fit half to register X7.
213 CCIfSplit<CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6],
214 [W0, W1, W2, W3, W4, W5, W6]>>>,
215 // i128 is split to two i64s, and its stack alignment is 16 bytes.
216 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
218 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
219 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
220 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
221 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
222 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
223 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
224 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
225 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
226 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
227 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
228 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
229 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
230 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
232 // If more than will fit in registers, pass them on the stack instead.
233 CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>,
234 CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16", CCAssignToStack<2, 2>>,
235 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
237 // Re-demote pointers to 32-bits so we don't end up storing 64-bit
238 // values and clobbering neighbouring stack locations. Not very pretty.
239 CCIfPtr<CCIfILP32<CCTruncToType<i32>>>,
240 CCIfPtr<CCIfILP32<CCAssignToStack<4, 4>>>,
242 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16],
243 CCAssignToStack<8, 8>>,
244 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
245 CCAssignToStack<16, 16>>
249 def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
250 CCIfType<[iPTR], CCBitConvertToType<i64>>,
251 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
252 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
254 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>,
256 // Handle all scalar types as either i64 or f64.
257 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
258 CCIfType<[f16, f32], CCPromoteToType<f64>>,
260 // Everything is on the stack.
261 // i128 is split to two i64s, and its stack alignment is 16 bytes.
262 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
263 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
264 CCAssignToStack<8, 8>>,
265 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
266 CCAssignToStack<16, 16>>
269 // In the ILP32 world, the minimum stack slot size is 4 bytes. Otherwise the
270 // same as the normal Darwin VarArgs handling.
272 def CC_AArch64_DarwinPCS_ILP32_VarArg : CallingConv<[
273 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
274 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
276 // Handle all scalar types as either i32 or f32.
277 CCIfType<[i8, i16], CCPromoteToType<i32>>,
278 CCIfType<[f16], CCPromoteToType<f32>>,
280 // Everything is on the stack.
281 // i128 is split to two i64s, and its stack alignment is 16 bytes.
282 CCIfPtr<CCIfILP32<CCTruncToType<i32>>>,
283 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
284 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
285 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
286 CCAssignToStack<8, 8>>,
287 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
288 CCAssignToStack<16, 16>>
292 // The WebKit_JS calling convention only passes the first argument (the callee)
293 // in register and the remaining arguments on stack. We allow 32bit stack slots,
294 // so that WebKit can write partial values in the stack and define the other
295 // 32bit quantity as undef.
297 def CC_AArch64_WebKit_JS : CallingConv<[
298 // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0).
299 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
300 CCIfType<[i32], CCAssignToRegWithShadow<[W0], [X0]>>,
301 CCIfType<[i64], CCAssignToRegWithShadow<[X0], [W0]>>,
303 // Pass the remaining arguments on the stack instead.
304 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
305 CCIfType<[i64, f64], CCAssignToStack<8, 8>>
309 def RetCC_AArch64_WebKit_JS : CallingConv<[
310 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
311 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
312 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
313 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
314 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
315 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
316 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
317 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
320 //===----------------------------------------------------------------------===//
321 // ARM64 Calling Convention for GHC
322 //===----------------------------------------------------------------------===//
324 // This calling convention is specific to the Glasgow Haskell Compiler.
325 // The only documentation is the GHC source code, specifically the C header
328 // https://github.com/ghc/ghc/blob/master/includes/stg/MachRegs.h
330 // which defines the registers for the Spineless Tagless G-Machine (STG) that
331 // GHC uses to implement lazy evaluation. The generic STG machine has a set of
332 // registers which are mapped to appropriate set of architecture specific
333 // registers for each CPU architecture.
335 // The STG Machine is documented here:
337 // https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/GeneratedCode
339 // The AArch64 register mapping is under the heading "The ARMv8/AArch64 ABI
340 // register mapping".
343 def CC_AArch64_GHC : CallingConv<[
344 CCIfType<[iPTR], CCBitConvertToType<i64>>,
346 // Handle all vector types as either f64 or v2f64.
347 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
348 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, f128], CCBitConvertToType<v2f64>>,
350 CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>,
351 CCIfType<[f32], CCAssignToReg<[S8, S9, S10, S11]>>,
352 CCIfType<[f64], CCAssignToReg<[D12, D13, D14, D15]>>,
354 // Promote i8/i16/i32 arguments to i64.
355 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
357 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
358 CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>>
361 // The order of the callee-saves in this file is important, because the
362 // FrameLowering code will use this order to determine the layout the
363 // callee-save area in the stack frame. As can be observed below, Darwin
364 // requires the frame-record (LR, FP) to be at the top the callee-save area,
365 // whereas for other platforms they are at the bottom.
367 // FIXME: LR is only callee-saved in the sense that *we* preserve it and are
368 // presumably a callee to someone. External functions may not do so, but this
369 // is currently safe since BL has LR as an implicit-def and what happens after a
370 // tail call doesn't matter.
372 // It would be better to model its preservation semantics properly (create a
373 // vreg on entry, use it in RET & tail call generation; make that vreg def if we
374 // end up saving LR as part of a call frame). Watch this space...
375 def CSR_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
376 X25, X26, X27, X28, LR, FP,
378 D12, D13, D14, D15)>;
380 // Darwin puts the frame-record at the top of the callee-save area.
381 def CSR_Darwin_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
382 X23, X24, X25, X26, X27, X28,
384 D12, D13, D14, D15)>;
386 // Win64 has unwinding codes for an (FP,LR) pair, save_fplr and save_fplr_x.
387 // We put FP before LR, so that frame lowering logic generates (FP,LR) pairs,
388 // and not (LR,FP) pairs.
389 def CSR_Win_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
390 X25, X26, X27, X28, FP, LR,
392 D12, D13, D14, D15)>;
394 // The Control Flow Guard check call uses a custom calling convention that also
395 // preserves X0-X8 and Q0-Q7.
396 def CSR_Win_AArch64_CFGuard_Check : CalleeSavedRegs<(add CSR_Win_AArch64_AAPCS,
397 (sequence "X%u", 0, 8),
398 (sequence "Q%u", 0, 7))>;
400 // AArch64 PCS for vector functions (VPCS)
401 // must (additionally) preserve full Q8-Q23 registers
402 def CSR_AArch64_AAVPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
403 X25, X26, X27, X28, LR, FP,
404 (sequence "Q%u", 8, 23))>;
406 // Functions taking SVE arguments or returning an SVE type
407 // must (additionally) preserve full Z8-Z23 and predicate registers P4-P15
408 def CSR_AArch64_SVE_AAPCS : CalleeSavedRegs<(add (sequence "Z%u", 8, 23),
409 (sequence "P%u", 4, 15),
410 X19, X20, X21, X22, X23, X24,
411 X25, X26, X27, X28, LR, FP)>;
413 // Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since
414 // 'this' and the pointer return value are both passed in X0 in these cases,
415 // this can be partially modelled by treating X0 as a callee-saved register;
416 // only the resulting RegMask is used; the SaveList is ignored
418 // (For generic ARM 64-bit ABI code, clang will not generate constructors or
419 // destructors with 'this' returns, so this RegMask will not be used in that
421 def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>;
423 def CSR_AArch64_AAPCS_SwiftError
424 : CalleeSavedRegs<(sub CSR_Darwin_AArch64_AAPCS, X21)>;
426 // The function used by Darwin to obtain the address of a thread-local variable
427 // guarantees more than a normal AAPCS function. x16 and x17 are used on the
428 // fast path for calculation, but other registers except X0 (argument/return)
429 // and LR (it is a call, after all) are preserved.
430 def CSR_AArch64_TLS_Darwin
431 : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17),
433 (sequence "Q%u", 0, 31))>;
435 // We can only handle a register pair with adjacent registers, the register pair
436 // should belong to the same class as well. Since the access function on the
437 // fast path calls a function that follows CSR_AArch64_TLS_Darwin,
438 // CSR_AArch64_CXX_TLS_Darwin should be a subset of CSR_AArch64_TLS_Darwin.
439 def CSR_AArch64_CXX_TLS_Darwin
440 : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS,
441 (sub (sequence "X%u", 1, 28), X15, X16, X17, X18),
442 (sequence "D%u", 0, 31))>;
444 // CSRs that are handled by prologue, epilogue.
445 def CSR_AArch64_CXX_TLS_Darwin_PE
446 : CalleeSavedRegs<(add LR, FP)>;
448 // CSRs that are handled explicitly via copies.
449 def CSR_AArch64_CXX_TLS_Darwin_ViaCopy
450 : CalleeSavedRegs<(sub CSR_AArch64_CXX_TLS_Darwin, LR, FP)>;
452 // The ELF stub used for TLS-descriptor access saves every feasible
453 // register. Only X0 and LR are clobbered.
454 def CSR_AArch64_TLS_ELF
455 : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP,
456 (sequence "Q%u", 0, 31))>;
458 def CSR_AArch64_AllRegs
459 : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP,
460 (sequence "X%u", 0, 28), FP, LR, SP,
461 (sequence "B%u", 0, 31), (sequence "H%u", 0, 31),
462 (sequence "S%u", 0, 31), (sequence "D%u", 0, 31),
463 (sequence "Q%u", 0, 31))>;
465 def CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>;
467 def CSR_AArch64_RT_MostRegs : CalleeSavedRegs<(add CSR_AArch64_AAPCS,
468 (sequence "X%u", 9, 15))>;
470 def CSR_AArch64_StackProbe_Windows
471 : CalleeSavedRegs<(add (sequence "X%u", 0, 15),
472 (sequence "X%u", 18, 28), FP, SP,
473 (sequence "Q%u", 0, 31))>;
475 // Variants of the standard calling conventions for shadow call stack.
476 // These all preserve x18 in addition to any other registers.
477 def CSR_AArch64_NoRegs_SCS
478 : CalleeSavedRegs<(add CSR_AArch64_NoRegs, X18)>;
479 def CSR_AArch64_AllRegs_SCS
480 : CalleeSavedRegs<(add CSR_AArch64_AllRegs, X18)>;
481 def CSR_AArch64_CXX_TLS_Darwin_SCS
482 : CalleeSavedRegs<(add CSR_AArch64_CXX_TLS_Darwin, X18)>;
483 def CSR_AArch64_AAPCS_SwiftError_SCS
484 : CalleeSavedRegs<(add CSR_AArch64_AAPCS_SwiftError, X18)>;
485 def CSR_AArch64_RT_MostRegs_SCS
486 : CalleeSavedRegs<(add CSR_AArch64_RT_MostRegs, X18)>;
487 def CSR_AArch64_AAVPCS_SCS
488 : CalleeSavedRegs<(add CSR_AArch64_AAVPCS, X18)>;
489 def CSR_AArch64_SVE_AAPCS_SCS
490 : CalleeSavedRegs<(add CSR_AArch64_SVE_AAPCS, X18)>;
491 def CSR_AArch64_AAPCS_SCS
492 : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X18)>;