1 //===---- AMDCallingConv.td - Calling Conventions for Radeon GPUs ---------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This describes the calling conventions for the AMD Radeon GPUs.
11 //===----------------------------------------------------------------------===//
13 // Inversion of CCIfInReg
14 class CCIfNotInReg<CCAction A> : CCIf<"!ArgFlags.isInReg()", A> {}
15 class CCIfExtend<CCAction A>
16 : CCIf<"ArgFlags.isSExt() || ArgFlags.isZExt()", A>;
18 // Calling convention for SI
19 def CC_SI : CallingConv<[
21 CCIfInReg<CCIfType<[f32, i32, f16, v2i16, v2f16] , CCAssignToReg<[
22 SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
23 SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
24 SGPR16, SGPR17, SGPR18, SGPR19, SGPR20, SGPR21, SGPR22, SGPR23,
25 SGPR24, SGPR25, SGPR26, SGPR27, SGPR28, SGPR29, SGPR30, SGPR31,
26 SGPR32, SGPR33, SGPR34, SGPR35, SGPR36, SGPR37, SGPR38, SGPR39,
27 SGPR40, SGPR41, SGPR42, SGPR43, SGPR44, SGPR45, SGPR46, SGPR47,
28 SGPR48, SGPR49, SGPR50, SGPR51, SGPR52, SGPR53, SGPR54, SGPR55,
29 SGPR56, SGPR57, SGPR58, SGPR59, SGPR60, SGPR61, SGPR62, SGPR63,
30 SGPR64, SGPR65, SGPR66, SGPR67, SGPR68, SGPR69, SGPR70, SGPR71,
31 SGPR72, SGPR73, SGPR74, SGPR75, SGPR76, SGPR77, SGPR78, SGPR79,
32 SGPR80, SGPR81, SGPR82, SGPR83, SGPR84, SGPR85, SGPR86, SGPR87,
33 SGPR88, SGPR89, SGPR90, SGPR91, SGPR92, SGPR93, SGPR94, SGPR95,
34 SGPR96, SGPR97, SGPR98, SGPR99, SGPR100, SGPR101, SGPR102, SGPR103,
38 // 32*4 + 4 is the minimum for a fetch shader consumer with 32 inputs.
39 CCIfNotInReg<CCIfType<[f32, i32, f16, v2i16, v2f16] , CCAssignToReg<[
40 VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
41 VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
42 VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
43 VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31,
44 VGPR32, VGPR33, VGPR34, VGPR35, VGPR36, VGPR37, VGPR38, VGPR39,
45 VGPR40, VGPR41, VGPR42, VGPR43, VGPR44, VGPR45, VGPR46, VGPR47,
46 VGPR48, VGPR49, VGPR50, VGPR51, VGPR52, VGPR53, VGPR54, VGPR55,
47 VGPR56, VGPR57, VGPR58, VGPR59, VGPR60, VGPR61, VGPR62, VGPR63,
48 VGPR64, VGPR65, VGPR66, VGPR67, VGPR68, VGPR69, VGPR70, VGPR71,
49 VGPR72, VGPR73, VGPR74, VGPR75, VGPR76, VGPR77, VGPR78, VGPR79,
50 VGPR80, VGPR81, VGPR82, VGPR83, VGPR84, VGPR85, VGPR86, VGPR87,
51 VGPR88, VGPR89, VGPR90, VGPR91, VGPR92, VGPR93, VGPR94, VGPR95,
52 VGPR96, VGPR97, VGPR98, VGPR99, VGPR100, VGPR101, VGPR102, VGPR103,
53 VGPR104, VGPR105, VGPR106, VGPR107, VGPR108, VGPR109, VGPR110, VGPR111,
54 VGPR112, VGPR113, VGPR114, VGPR115, VGPR116, VGPR117, VGPR118, VGPR119,
55 VGPR120, VGPR121, VGPR122, VGPR123, VGPR124, VGPR125, VGPR126, VGPR127,
56 VGPR128, VGPR129, VGPR130, VGPR131, VGPR132, VGPR133, VGPR134, VGPR135
60 def RetCC_SI_Shader : CallingConv<[
61 CCIfType<[i32] , CCAssignToReg<[
62 SGPR0, SGPR1, SGPR2, SGPR3, SGPR4, SGPR5, SGPR6, SGPR7,
63 SGPR8, SGPR9, SGPR10, SGPR11, SGPR12, SGPR13, SGPR14, SGPR15,
64 SGPR16, SGPR17, SGPR18, SGPR19, SGPR20, SGPR21, SGPR22, SGPR23,
65 SGPR24, SGPR25, SGPR26, SGPR27, SGPR28, SGPR29, SGPR30, SGPR31,
66 SGPR32, SGPR33, SGPR34, SGPR35, SGPR36, SGPR37, SGPR38, SGPR39,
67 SGPR40, SGPR41, SGPR42, SGPR43, SGPR44, SGPR45, SGPR46, SGPR47,
68 SGPR48, SGPR49, SGPR50, SGPR51, SGPR52, SGPR53, SGPR54, SGPR55,
69 SGPR56, SGPR57, SGPR58, SGPR59, SGPR60, SGPR61, SGPR62, SGPR63,
70 SGPR64, SGPR65, SGPR66, SGPR67, SGPR68, SGPR69, SGPR70, SGPR71,
71 SGPR72, SGPR73, SGPR74, SGPR75, SGPR76, SGPR77, SGPR78, SGPR79,
72 SGPR80, SGPR81, SGPR82, SGPR83, SGPR84, SGPR85, SGPR86, SGPR87,
73 SGPR88, SGPR89, SGPR90, SGPR91, SGPR92, SGPR93, SGPR94, SGPR95,
74 SGPR96, SGPR97, SGPR98, SGPR99, SGPR100, SGPR101, SGPR102, SGPR103,
78 // 32*4 + 4 is the minimum for a fetch shader with 32 outputs.
79 CCIfType<[f32, f16, v2f16] , CCAssignToReg<[
80 VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
81 VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
82 VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
83 VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31,
84 VGPR32, VGPR33, VGPR34, VGPR35, VGPR36, VGPR37, VGPR38, VGPR39,
85 VGPR40, VGPR41, VGPR42, VGPR43, VGPR44, VGPR45, VGPR46, VGPR47,
86 VGPR48, VGPR49, VGPR50, VGPR51, VGPR52, VGPR53, VGPR54, VGPR55,
87 VGPR56, VGPR57, VGPR58, VGPR59, VGPR60, VGPR61, VGPR62, VGPR63,
88 VGPR64, VGPR65, VGPR66, VGPR67, VGPR68, VGPR69, VGPR70, VGPR71,
89 VGPR72, VGPR73, VGPR74, VGPR75, VGPR76, VGPR77, VGPR78, VGPR79,
90 VGPR80, VGPR81, VGPR82, VGPR83, VGPR84, VGPR85, VGPR86, VGPR87,
91 VGPR88, VGPR89, VGPR90, VGPR91, VGPR92, VGPR93, VGPR94, VGPR95,
92 VGPR96, VGPR97, VGPR98, VGPR99, VGPR100, VGPR101, VGPR102, VGPR103,
93 VGPR104, VGPR105, VGPR106, VGPR107, VGPR108, VGPR109, VGPR110, VGPR111,
94 VGPR112, VGPR113, VGPR114, VGPR115, VGPR116, VGPR117, VGPR118, VGPR119,
95 VGPR120, VGPR121, VGPR122, VGPR123, VGPR124, VGPR125, VGPR126, VGPR127,
96 VGPR128, VGPR129, VGPR130, VGPR131, VGPR132, VGPR133, VGPR134, VGPR135
100 def CSR_AMDGPU_VGPRs_24_255 : CalleeSavedRegs<
101 (sequence "VGPR%u", 24, 255)
104 def CSR_AMDGPU_VGPRs_32_255 : CalleeSavedRegs<
105 (sequence "VGPR%u", 32, 255)
108 def CSR_AMDGPU_SGPRs_32_105 : CalleeSavedRegs<
109 (sequence "SGPR%u", 32, 105)
112 // Just to get the regmask, not for calling convention purposes.
113 def CSR_AMDGPU_AllVGPRs : CalleeSavedRegs<
114 (sequence "VGPR%u", 0, 255)
117 // Just to get the regmask, not for calling convention purposes.
118 def CSR_AMDGPU_AllAllocatableSRegs : CalleeSavedRegs<
119 (add (sequence "SGPR%u", 0, 105), VCC_LO, VCC_HI)
122 def CSR_AMDGPU_HighRegs : CalleeSavedRegs<
123 (add CSR_AMDGPU_VGPRs_32_255, CSR_AMDGPU_SGPRs_32_105)
126 // Calling convention for leaf functions
127 def CC_AMDGPU_Func : CallingConv<[
128 CCIfByVal<CCPassByVal<4, 4>>,
129 CCIfType<[i1], CCPromoteToType<i32>>,
130 CCIfType<[i1, i8, i16], CCIfExtend<CCPromoteToType<i32>>>,
131 CCIfType<[i32, f32, i16, f16, v2i16, v2f16, i1], CCAssignToReg<[
132 VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
133 VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
134 VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
135 VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31]>>,
136 CCIfType<[i32, f32, v2i16, v2f16, i16, f16, i1], CCAssignToStack<4, 4>>,
137 CCIfType<[i64, f64, v2i32, v2f32], CCAssignToStack<8, 4>>,
138 CCIfType<[v3i32, v3f32], CCAssignToStack<12, 4>>,
139 CCIfType<[v4i32, v4f32, v2i64, v2f64], CCAssignToStack<16, 4>>,
140 CCIfType<[v5i32, v5f32], CCAssignToStack<20, 4>>,
141 CCIfType<[v8i32, v8f32], CCAssignToStack<32, 4>>,
142 CCIfType<[v16i32, v16f32], CCAssignToStack<64, 4>>
145 // Calling convention for leaf functions
146 def RetCC_AMDGPU_Func : CallingConv<[
147 CCIfType<[i1], CCPromoteToType<i32>>,
148 CCIfType<[i1, i16], CCIfExtend<CCPromoteToType<i32>>>,
149 CCIfType<[i32, f32, i16, f16, v2i16, v2f16], CCAssignToReg<[
150 VGPR0, VGPR1, VGPR2, VGPR3, VGPR4, VGPR5, VGPR6, VGPR7,
151 VGPR8, VGPR9, VGPR10, VGPR11, VGPR12, VGPR13, VGPR14, VGPR15,
152 VGPR16, VGPR17, VGPR18, VGPR19, VGPR20, VGPR21, VGPR22, VGPR23,
153 VGPR24, VGPR25, VGPR26, VGPR27, VGPR28, VGPR29, VGPR30, VGPR31]>>,
156 def CC_AMDGPU : CallingConv<[
157 CCIf<"static_cast<const GCNSubtarget&>"
158 "(State.getMachineFunction().getSubtarget()).getGeneration() >= "
159 "AMDGPUSubtarget::SOUTHERN_ISLANDS",
160 CCDelegateTo<CC_SI>>,
161 CCIf<"static_cast<const GCNSubtarget&>"
162 "(State.getMachineFunction().getSubtarget()).getGeneration() >= "
163 "AMDGPUSubtarget::SOUTHERN_ISLANDS && State.getCallingConv() == CallingConv::C",
164 CCDelegateTo<CC_AMDGPU_Func>>