1 //===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This describes the calling conventions for the PowerPC 32- and 64-bit
12 //===----------------------------------------------------------------------===//
14 /// CCIfSubtarget - Match if the current subtarget has a feature F.
15 class CCIfSubtarget<string F, CCAction A>
16 : CCIf<!strconcat("static_cast<const PPCSubtarget&>"
17 "(State.getMachineFunction().getSubtarget()).",
20 class CCIfNotSubtarget<string F, CCAction A>
21 : CCIf<!strconcat("!static_cast<const PPCSubtarget&>"
22 "(State.getMachineFunction().getSubtarget()).",
25 class CCIfOrigArgWasNotPPCF128<CCAction A>
26 : CCIf<"!static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)",
28 class CCIfOrigArgWasPPCF128<CCAction A>
29 : CCIf<"static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)",
32 //===----------------------------------------------------------------------===//
33 // Return Value Calling Convention
34 //===----------------------------------------------------------------------===//
36 // PPC64 AnyReg return-value convention. No explicit register is specified for
37 // the return-value. The register allocator is allowed and expected to choose
40 // This calling convention is currently only supported by the stackmap and
41 // patchpoint intrinsics. All other uses will result in an assert on Debug
42 // builds. On Release builds we fallback to the PPC C calling convention.
43 def RetCC_PPC64_AnyReg : CallingConv<[
44 CCCustom<"CC_PPC_AnyReg_Error">
47 // Return-value convention for PowerPC coldcc.
49 def RetCC_PPC_Cold : CallingConv<[
50 // Use the same return registers as RetCC_PPC, but limited to only
51 // one return value. The remaining return values will be saved to
53 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
54 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>,
56 CCIfType<[i32], CCAssignToReg<[R3]>>,
57 CCIfType<[i64], CCAssignToReg<[X3]>>,
58 CCIfType<[i128], CCAssignToReg<[X3]>>,
60 CCIfType<[f32], CCAssignToReg<[F1]>>,
61 CCIfType<[f64], CCAssignToReg<[F1]>>,
62 CCIfType<[f128], CCIfSubtarget<"hasP9Vector()", CCAssignToReg<[V2]>>>,
64 CCIfType<[v4f64, v4f32, v4i1],
65 CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1]>>>,
67 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
68 CCIfSubtarget<"hasAltivec()",
72 // Return-value convention for PowerPC
74 def RetCC_PPC : CallingConv<[
75 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>,
77 // On PPC64, integer return values are always promoted to i64
78 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
79 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>,
81 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>,
82 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
83 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
85 // Floating point types returned as "direct" go into F1 .. F8; note that
86 // only the ELFv2 ABI fully utilizes all these registers.
87 CCIfNotSubtarget<"hasSPE()",
88 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>,
89 CCIfNotSubtarget<"hasSPE()",
90 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>,
91 CCIfSubtarget<"hasSPE()",
92 CCIfType<[f32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>,
93 CCIfSubtarget<"hasSPE()",
94 CCIfType<[f64], CCCustom<"CC_PPC32_SPE_RetF64">>>,
96 // For P9, f128 are passed in vector registers.
98 CCIfSubtarget<"hasP9Vector()",
99 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>,
101 // QPX vectors are returned in QF1 and QF2.
102 CCIfType<[v4f64, v4f32, v4i1],
103 CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>,
105 // Vector types returned as "direct" go into V2 .. V9; note that only the
106 // ELFv2 ABI fully utilizes all these registers.
107 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
108 CCIfSubtarget<"hasAltivec()",
109 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>
112 // No explicit register is specified for the AnyReg calling convention. The
113 // register allocator may assign the arguments to any free register.
115 // This calling convention is currently only supported by the stackmap and
116 // patchpoint intrinsics. All other uses will result in an assert on Debug
117 // builds. On Release builds we fallback to the PPC C calling convention.
118 def CC_PPC64_AnyReg : CallingConv<[
119 CCCustom<"CC_PPC_AnyReg_Error">
122 // Note that we don't currently have calling conventions for 64-bit
123 // PowerPC, but handle all the complexities of the ABI in the lowering
124 // logic. FIXME: See if the logic can be simplified with use of CCs.
125 // This may require some extensions to current table generation.
127 // Simple calling convention for 64-bit ELF PowerPC fast isel.
128 // Only handle ints and floats. All ints are promoted to i64.
129 // Vector types and quadword ints are not handled.
131 def CC_PPC64_ELF_FIS : CallingConv<[
132 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>,
134 CCIfType<[i1], CCPromoteToType<i64>>,
135 CCIfType<[i8], CCPromoteToType<i64>>,
136 CCIfType<[i16], CCPromoteToType<i64>>,
137 CCIfType<[i32], CCPromoteToType<i64>>,
138 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>,
139 CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>
142 // Simple return-value convention for 64-bit ELF PowerPC fast isel.
143 // All small ints are promoted to i64. Vector types, quadword ints,
144 // and multiple register returns are "supported" to avoid compile
145 // errors, but none are handled by the fast selector.
147 def RetCC_PPC64_ELF_FIS : CallingConv<[
148 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>,
150 CCIfType<[i1], CCPromoteToType<i64>>,
151 CCIfType<[i8], CCPromoteToType<i64>>,
152 CCIfType<[i16], CCPromoteToType<i64>>,
153 CCIfType<[i32], CCPromoteToType<i64>>,
154 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
155 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
156 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
157 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
159 CCIfSubtarget<"hasP9Vector()",
160 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>,
161 CCIfType<[v4f64, v4f32, v4i1],
162 CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>,
163 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
164 CCIfSubtarget<"hasAltivec()",
165 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>
168 //===----------------------------------------------------------------------===//
169 // PowerPC System V Release 4 32-bit ABI
170 //===----------------------------------------------------------------------===//
172 def CC_PPC32_SVR4_Common : CallingConv<[
173 CCIfType<[i1], CCPromoteToType<i32>>,
175 // The ABI requires i64 to be passed in two adjacent registers with the first
176 // register having an odd register number.
178 CCIfSplit<CCIfSubtarget<"useSoftFloat()",
179 CCIfOrigArgWasNotPPCF128<
180 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>>,
183 CCIfSplit<CCIfNotSubtarget<"useSoftFloat()",
184 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>,
186 CCIfSubtarget<"hasSPE()",
187 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>,
188 CCIfSplit<CCIfSubtarget<"useSoftFloat()",
189 CCIfOrigArgWasPPCF128<CCCustom<
190 "CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128">>>>,
192 // The 'nest' parameter, if any, is passed in R11.
193 CCIfNest<CCAssignToReg<[R11]>>,
195 // The first 8 integer arguments are passed in integer registers.
196 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>,
198 // Make sure the i64 words from a long double are either both passed in
199 // registers or both passed on the stack.
200 CCIfType<[f64], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignFPArgRegs">>>,
202 // FP values are passed in F1 - F8.
204 CCIfNotSubtarget<"hasSPE()",
205 CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>,
207 CCIfSubtarget<"hasSPE()",
208 CCCustom<"CC_PPC32_SPE_CustomSplitFP64">>>,
210 CCIfSubtarget<"hasSPE()",
211 CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>,
213 // Split arguments have an alignment of 8 bytes on the stack.
214 CCIfType<[i32], CCIfSplit<CCAssignToStack<4, 8>>>,
216 CCIfType<[i32], CCAssignToStack<4, 4>>,
218 // Floats are stored in double precision format, thus they have the same
219 // alignment and size as doubles.
220 // With SPE floats are stored as single precision, so have alignment and
222 CCIfType<[f32,f64], CCIfNotSubtarget<"hasSPE()", CCAssignToStack<8, 8>>>,
223 CCIfType<[f32], CCIfSubtarget<"hasSPE()", CCAssignToStack<4, 4>>>,
224 CCIfType<[f64], CCIfSubtarget<"hasSPE()", CCAssignToStack<8, 8>>>,
226 // QPX vectors that are stored in double precision need 32-byte alignment.
227 CCIfType<[v4f64, v4i1], CCAssignToStack<32, 32>>,
229 // Vectors and float128 get 16-byte stack slots that are 16-byte aligned.
230 CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>,
231 CCIfType<[f128], CCIfSubtarget<"hasP9Vector()", CCAssignToStack<16, 16>>>
234 // This calling convention puts vector arguments always on the stack. It is used
235 // to assign vector arguments which belong to the variable portion of the
236 // parameter list of a variable argument function.
238 def CC_PPC32_SVR4_VarArg : CallingConv<[
239 CCDelegateTo<CC_PPC32_SVR4_Common>
242 // In contrast to CC_PPC32_SVR4_VarArg, this calling convention first tries to
243 // put vector arguments in vector registers before putting them on the stack.
245 def CC_PPC32_SVR4 : CallingConv<[
246 // QPX vectors mirror the scalar FP convention.
247 CCIfType<[v4f64, v4f32, v4i1], CCIfSubtarget<"hasQPX()",
248 CCAssignToReg<[QF1, QF2, QF3, QF4, QF5, QF6, QF7, QF8]>>>,
250 // The first 12 Vector arguments are passed in AltiVec registers.
251 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64],
252 CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7,
253 V8, V9, V10, V11, V12, V13]>>>,
255 // Float128 types treated as vector arguments.
257 CCIfSubtarget<"hasP9Vector()", CCAssignToReg<[V2, V3, V4, V5, V6, V7,
258 V8, V9, V10, V11, V12, V13]>>>,
260 CCDelegateTo<CC_PPC32_SVR4_Common>
263 // Helper "calling convention" to handle aggregate by value arguments.
264 // Aggregate by value arguments are always placed in the local variable space
265 // of the caller. This calling convention is only used to assign those stack
266 // offsets in the callers stack frame.
268 // Still, the address of the aggregate copy in the callers stack frame is passed
269 // in a GPR (or in the parameter list area if all GPRs are allocated) from the
270 // caller to the callee. The location for the address argument is assigned by
271 // the CC_PPC32_SVR4 calling convention.
273 // The only purpose of CC_PPC32_SVR4_Custom_Dummy is to skip arguments which are
274 // not passed by value.
277 def CC_PPC32_SVR4_ByVal : CallingConv<[
278 CCIfByVal<CCPassByVal<4, 4>>,
280 CCCustom<"CC_PPC32_SVR4_Custom_Dummy">
283 def CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27,
284 V28, V29, V30, V31)>;
286 def CSR_Darwin32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20,
287 R21, R22, R23, R24, R25, R26, R27, R28,
288 R29, R30, R31, F14, F15, F16, F17, F18,
289 F19, F20, F21, F22, F23, F24, F25, F26,
290 F27, F28, F29, F30, F31, CR2, CR3, CR4
293 def CSR_Darwin32_Altivec : CalleeSavedRegs<(add CSR_Darwin32, CSR_Altivec)>;
295 // SPE does not use FPRs, so break out the common register set as base.
296 def CSR_SVR432_COMM : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20,
297 R21, R22, R23, R24, R25, R26, R27,
298 R28, R29, R30, R31, CR2, CR3, CR4
300 def CSR_SVR432 : CalleeSavedRegs<(add CSR_SVR432_COMM, F14, F15, F16, F17, F18,
301 F19, F20, F21, F22, F23, F24, F25, F26,
302 F27, F28, F29, F30, F31
304 def CSR_SPE : CalleeSavedRegs<(add S14, S15, S16, S17, S18, S19, S20, S21, S22,
305 S23, S24, S25, S26, S27, S28, S29, S30, S31
308 def CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>;
310 def CSR_SVR432_SPE : CalleeSavedRegs<(add CSR_SVR432_COMM, CSR_SPE)>;
312 def CSR_AIX32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20,
313 R21, R22, R23, R24, R25, R26, R27, R28,
314 R29, R30, R31, F14, F15, F16, F17, F18,
315 F19, F20, F21, F22, F23, F24, F25, F26,
316 F27, F28, F29, F30, F31, CR2, CR3, CR4
319 def CSR_Darwin64 : CalleeSavedRegs<(add X13, X14, X15, X16, X17, X18, X19, X20,
320 X21, X22, X23, X24, X25, X26, X27, X28,
321 X29, X30, X31, F14, F15, F16, F17, F18,
322 F19, F20, F21, F22, F23, F24, F25, F26,
323 F27, F28, F29, F30, F31, CR2, CR3, CR4
326 def CSR_Darwin64_Altivec : CalleeSavedRegs<(add CSR_Darwin64, CSR_Altivec)>;
328 def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20,
329 X21, X22, X23, X24, X25, X26, X27, X28,
330 X29, X30, X31, F14, F15, F16, F17, F18,
331 F19, F20, F21, F22, F23, F24, F25, F26,
332 F27, F28, F29, F30, F31, CR2, CR3, CR4
335 def CSR_AIX64 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20,
336 X21, X22, X23, X24, X25, X26, X27, X28,
337 X29, X30, X31, F14, F15, F16, F17, F18,
338 F19, F20, F21, F22, F23, F24, F25, F26,
339 F27, F28, F29, F30, F31, CR2, CR3, CR4
342 // CSRs that are handled by prologue, epilogue.
343 def CSR_SRV464_TLS_PE : CalleeSavedRegs<(add)>;
345 def CSR_SVR464_ViaCopy : CalleeSavedRegs<(add CSR_SVR464)>;
347 def CSR_SVR464_Altivec : CalleeSavedRegs<(add CSR_SVR464, CSR_Altivec)>;
349 def CSR_SVR464_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_Altivec)>;
351 def CSR_SVR464_R2 : CalleeSavedRegs<(add CSR_SVR464, X2)>;
353 def CSR_SVR464_R2_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2)>;
355 def CSR_SVR464_R2_Altivec : CalleeSavedRegs<(add CSR_SVR464_Altivec, X2)>;
357 def CSR_SVR464_R2_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2_Altivec)>;
359 def CSR_NoRegs : CalleeSavedRegs<(add)>;
361 // coldcc calling convection marks most registers as non-volatile.
362 // Do not include r1 since the stack pointer is never considered a CSR.
363 // Do not include r2, since it is the TOC register and is added depending
364 // on whether or not the function uses the TOC and is a non-leaf.
365 // Do not include r0,r11,r13 as they are optional in functional linkage
366 // and value may be altered by inter-library calls.
367 // Do not include r12 as it is used as a scratch register.
368 // Do not include return registers r3, f1, v2.
369 def CSR_SVR32_ColdCC_Common : CalleeSavedRegs<(add (sequence "R%u", 4, 10),
370 (sequence "R%u", 14, 31),
371 (sequence "CR%u", 0, 7))>;
373 def CSR_SVR32_ColdCC : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common,
374 F0, (sequence "F%u", 2, 31))>;
377 def CSR_SVR32_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR32_ColdCC,
378 (sequence "V%u", 0, 1),
379 (sequence "V%u", 3, 31))>;
381 def CSR_SVR32_ColdCC_SPE : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common,
382 (sequence "S%u", 4, 10),
383 (sequence "S%u", 14, 31))>;
385 def CSR_SVR64_ColdCC : CalleeSavedRegs<(add (sequence "X%u", 4, 10),
386 (sequence "X%u", 14, 31),
387 F0, (sequence "F%u", 2, 31),
388 (sequence "CR%u", 0, 7))>;
390 def CSR_SVR64_ColdCC_R2: CalleeSavedRegs<(add CSR_SVR64_ColdCC, X2)>;
392 def CSR_SVR64_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC,
393 (sequence "V%u", 0, 1),
394 (sequence "V%u", 3, 31))>;
396 def CSR_SVR64_ColdCC_R2_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC_Altivec, X2)>;
398 def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10),
399 (sequence "X%u", 14, 31),
400 (sequence "F%u", 0, 31),
401 (sequence "CR%u", 0, 7))>;
403 def CSR_64_AllRegs_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs,
404 (sequence "V%u", 0, 31))>;
406 def CSR_64_AllRegs_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec,
407 (sequence "VSL%u", 0, 31))>;