[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / AMDGPU / AMDGPUArgumentUsageInfo.cpp
blobaab76d27ef117cc2c462da08e81c78e307e868bb
1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "AMDGPUArgumentUsageInfo.h"
10 #include "AMDGPU.h"
11 #include "AMDGPUTargetMachine.h"
12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13 #include "SIRegisterInfo.h"
14 #include "llvm/CodeGen/TargetRegisterInfo.h"
15 #include "llvm/IR/Function.h"
16 #include "llvm/Support/NativeFormatting.h"
17 #include "llvm/Support/raw_ostream.h"
19 using namespace llvm;
21 #define DEBUG_TYPE "amdgpu-argument-reg-usage-info"
23 INITIALIZE_PASS(AMDGPUArgumentUsageInfo, DEBUG_TYPE,
24 "Argument Register Usage Information Storage", false, true)
26 void ArgDescriptor::print(raw_ostream &OS,
27 const TargetRegisterInfo *TRI) const {
28 if (!isSet()) {
29 OS << "<not set>\n";
30 return;
33 if (isRegister())
34 OS << "Reg " << printReg(getRegister(), TRI);
35 else
36 OS << "Stack offset " << getStackOffset();
38 if (isMasked()) {
39 OS << " & ";
40 llvm::write_hex(OS, Mask, llvm::HexPrintStyle::PrefixLower);
43 OS << '\n';
46 char AMDGPUArgumentUsageInfo::ID = 0;
48 const AMDGPUFunctionArgInfo AMDGPUArgumentUsageInfo::ExternFunctionInfo{};
50 // Hardcoded registers from fixed function ABI
51 const AMDGPUFunctionArgInfo AMDGPUArgumentUsageInfo::FixedABIFunctionInfo
52 = AMDGPUFunctionArgInfo::fixedABILayout();
54 bool AMDGPUArgumentUsageInfo::doInitialization(Module &M) {
55 return false;
58 bool AMDGPUArgumentUsageInfo::doFinalization(Module &M) {
59 ArgInfoMap.clear();
60 return false;
63 void AMDGPUArgumentUsageInfo::print(raw_ostream &OS, const Module *M) const {
64 for (const auto &FI : ArgInfoMap) {
65 OS << "Arguments for " << FI.first->getName() << '\n'
66 << " PrivateSegmentBuffer: " << FI.second.PrivateSegmentBuffer
67 << " DispatchPtr: " << FI.second.DispatchPtr
68 << " QueuePtr: " << FI.second.QueuePtr
69 << " KernargSegmentPtr: " << FI.second.KernargSegmentPtr
70 << " DispatchID: " << FI.second.DispatchID
71 << " FlatScratchInit: " << FI.second.FlatScratchInit
72 << " PrivateSegmentSize: " << FI.second.PrivateSegmentSize
73 << " WorkGroupIDX: " << FI.second.WorkGroupIDX
74 << " WorkGroupIDY: " << FI.second.WorkGroupIDY
75 << " WorkGroupIDZ: " << FI.second.WorkGroupIDZ
76 << " WorkGroupInfo: " << FI.second.WorkGroupInfo
77 << " PrivateSegmentWaveByteOffset: "
78 << FI.second.PrivateSegmentWaveByteOffset
79 << " ImplicitBufferPtr: " << FI.second.ImplicitBufferPtr
80 << " ImplicitArgPtr: " << FI.second.ImplicitArgPtr
81 << " WorkItemIDX " << FI.second.WorkItemIDX
82 << " WorkItemIDY " << FI.second.WorkItemIDY
83 << " WorkItemIDZ " << FI.second.WorkItemIDZ
84 << '\n';
88 std::tuple<const ArgDescriptor *, const TargetRegisterClass *, LLT>
89 AMDGPUFunctionArgInfo::getPreloadedValue(
90 AMDGPUFunctionArgInfo::PreloadedValue Value) const {
91 switch (Value) {
92 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER: {
93 return std::make_tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer
94 : nullptr,
95 &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32));
97 case AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR:
98 return std::make_tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr,
99 &AMDGPU::SGPR_64RegClass,
100 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
101 case AMDGPUFunctionArgInfo::WORKGROUP_ID_X:
102 return std::make_tuple(WorkGroupIDX ? &WorkGroupIDX : nullptr,
103 &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
104 case AMDGPUFunctionArgInfo::WORKGROUP_ID_Y:
105 return std::make_tuple(WorkGroupIDY ? &WorkGroupIDY : nullptr,
106 &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
107 case AMDGPUFunctionArgInfo::WORKGROUP_ID_Z:
108 return std::make_tuple(WorkGroupIDZ ? &WorkGroupIDZ : nullptr,
109 &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
110 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
111 return std::make_tuple(
112 PrivateSegmentWaveByteOffset ? &PrivateSegmentWaveByteOffset : nullptr,
113 &AMDGPU::SGPR_32RegClass, LLT::scalar(32));
114 case AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR:
115 return std::make_tuple(KernargSegmentPtr ? &KernargSegmentPtr : nullptr,
116 &AMDGPU::SGPR_64RegClass,
117 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
118 case AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR:
119 return std::make_tuple(ImplicitArgPtr ? &ImplicitArgPtr : nullptr,
120 &AMDGPU::SGPR_64RegClass,
121 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
122 case AMDGPUFunctionArgInfo::DISPATCH_ID:
123 return std::make_tuple(DispatchID ? &DispatchID : nullptr,
124 &AMDGPU::SGPR_64RegClass, LLT::scalar(64));
125 case AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT:
126 return std::make_tuple(FlatScratchInit ? &FlatScratchInit : nullptr,
127 &AMDGPU::SGPR_64RegClass, LLT::scalar(64));
128 case AMDGPUFunctionArgInfo::DISPATCH_PTR:
129 return std::make_tuple(DispatchPtr ? &DispatchPtr : nullptr,
130 &AMDGPU::SGPR_64RegClass,
131 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
132 case AMDGPUFunctionArgInfo::QUEUE_PTR:
133 return std::make_tuple(QueuePtr ? &QueuePtr : nullptr,
134 &AMDGPU::SGPR_64RegClass,
135 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
136 case AMDGPUFunctionArgInfo::WORKITEM_ID_X:
137 return std::make_tuple(WorkItemIDX ? &WorkItemIDX : nullptr,
138 &AMDGPU::VGPR_32RegClass, LLT::scalar(32));
139 case AMDGPUFunctionArgInfo::WORKITEM_ID_Y:
140 return std::make_tuple(WorkItemIDY ? &WorkItemIDY : nullptr,
141 &AMDGPU::VGPR_32RegClass, LLT::scalar(32));
142 case AMDGPUFunctionArgInfo::WORKITEM_ID_Z:
143 return std::make_tuple(WorkItemIDZ ? &WorkItemIDZ : nullptr,
144 &AMDGPU::VGPR_32RegClass, LLT::scalar(32));
146 llvm_unreachable("unexpected preloaded value type");
149 constexpr AMDGPUFunctionArgInfo AMDGPUFunctionArgInfo::fixedABILayout() {
150 AMDGPUFunctionArgInfo AI;
151 AI.PrivateSegmentBuffer
152 = ArgDescriptor::createRegister(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3);
153 AI.DispatchPtr = ArgDescriptor::createRegister(AMDGPU::SGPR4_SGPR5);
154 AI.QueuePtr = ArgDescriptor::createRegister(AMDGPU::SGPR6_SGPR7);
156 // Do not pass kernarg segment pointer, only pass increment version in its
157 // place.
158 AI.ImplicitArgPtr = ArgDescriptor::createRegister(AMDGPU::SGPR8_SGPR9);
159 AI.DispatchID = ArgDescriptor::createRegister(AMDGPU::SGPR10_SGPR11);
161 // Skip FlatScratchInit/PrivateSegmentSize
162 AI.WorkGroupIDX = ArgDescriptor::createRegister(AMDGPU::SGPR12);
163 AI.WorkGroupIDY = ArgDescriptor::createRegister(AMDGPU::SGPR13);
164 AI.WorkGroupIDZ = ArgDescriptor::createRegister(AMDGPU::SGPR14);
166 const unsigned Mask = 0x3ff;
167 AI.WorkItemIDX = ArgDescriptor::createRegister(AMDGPU::VGPR31, Mask);
168 AI.WorkItemIDY = ArgDescriptor::createRegister(AMDGPU::VGPR31, Mask << 10);
169 AI.WorkItemIDZ = ArgDescriptor::createRegister(AMDGPU::VGPR31, Mask << 20);
170 return AI;
173 const AMDGPUFunctionArgInfo &
174 AMDGPUArgumentUsageInfo::lookupFuncArgInfo(const Function &F) const {
175 auto I = ArgInfoMap.find(&F);
176 if (I == ArgInfoMap.end()) {
177 if (AMDGPUTargetMachine::EnableFixedFunctionABI)
178 return FixedABIFunctionInfo;
180 // Without the fixed ABI, we assume no function has special inputs.
181 assert(F.isDeclaration());
182 return ExternFunctionInfo;
185 return I->second;