1 //===--- AMDGPU.h - Declare AMDGPU target feature support -------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file declares AMDGPU TargetInfo objects.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H
14 #define LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H
16 #include "clang/Basic/TargetID.h"
17 #include "clang/Basic/TargetInfo.h"
18 #include "clang/Basic/TargetOptions.h"
19 #include "llvm/ADT/StringSet.h"
20 #include "llvm/ADT/Triple.h"
21 #include "llvm/Support/Compiler.h"
22 #include "llvm/Support/TargetParser.h"
27 class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final
: public TargetInfo
{
29 static const Builtin::Info BuiltinInfo
[];
30 static const char *const GCCRegNames
[];
39 static const LangASMap AMDGPUDefIsGenMap
;
40 static const LangASMap AMDGPUDefIsPrivMap
;
42 llvm::AMDGPU::GPUKind GPUKind
;
44 unsigned WavefrontSize
;
46 /// Target ID is device name followed by optional feature name postfixed
47 /// by plus or minus sign delimitted by colon, e.g. gfx908:xnack+:sramecc-.
48 /// If the target ID contains feature+, map it to true.
49 /// If the target ID contains feature-, map it to false.
50 /// If the target ID does not contain a feature (default), do not map it.
51 llvm::StringMap
<bool> OffloadArchFeatures
;
54 bool hasFP64() const {
55 return getTriple().getArch() == llvm::Triple::amdgcn
||
56 !!(GPUFeatures
& llvm::AMDGPU::FEATURE_FP64
);
60 bool hasFastFMAF() const {
61 return !!(GPUFeatures
& llvm::AMDGPU::FEATURE_FAST_FMA_F32
);
65 bool hasFastFMA() const {
66 return getTriple().getArch() == llvm::Triple::amdgcn
;
69 bool hasFMAF() const {
70 return getTriple().getArch() == llvm::Triple::amdgcn
||
71 !!(GPUFeatures
& llvm::AMDGPU::FEATURE_FMA
);
74 bool hasFullRateDenormalsF32() const {
75 return !!(GPUFeatures
& llvm::AMDGPU::FEATURE_FAST_DENORMAL_F32
);
78 bool hasLDEXPF() const {
79 return getTriple().getArch() == llvm::Triple::amdgcn
||
80 !!(GPUFeatures
& llvm::AMDGPU::FEATURE_LDEXP
);
83 static bool isAMDGCN(const llvm::Triple
&TT
) {
84 return TT
.getArch() == llvm::Triple::amdgcn
;
87 static bool isR600(const llvm::Triple
&TT
) {
88 return TT
.getArch() == llvm::Triple::r600
;
92 AMDGPUTargetInfo(const llvm::Triple
&Triple
, const TargetOptions
&Opts
);
94 void setAddressSpaceMap(bool DefaultIsPrivate
);
96 void adjust(DiagnosticsEngine
&Diags
, LangOptions
&Opts
) override
;
98 uint64_t getPointerWidthV(unsigned AddrSpace
) const override
{
99 if (isR600(getTriple()))
102 if (AddrSpace
== Private
|| AddrSpace
== Local
)
108 uint64_t getPointerAlignV(unsigned AddrSpace
) const override
{
109 return getPointerWidthV(AddrSpace
);
112 uint64_t getMaxPointerWidth() const override
{
113 return getTriple().getArch() == llvm::Triple::amdgcn
? 64 : 32;
116 const char *getClobbers() const override
{ return ""; }
118 ArrayRef
<const char *> getGCCRegNames() const override
;
120 ArrayRef
<TargetInfo::GCCRegAlias
> getGCCRegAliases() const override
{
124 /// Accepted register names: (n, m is unsigned integer, n < m)
131 /// {S} , where S is a special register name
135 bool validateAsmConstraint(const char *&Name
,
136 TargetInfo::ConstraintInfo
&Info
) const override
{
137 static const ::llvm::StringSet
<> SpecialRegs({
138 "exec", "vcc", "flat_scratch", "m0", "scc", "tba", "tma",
139 "flat_scratch_lo", "flat_scratch_hi", "vcc_lo", "vcc_hi", "exec_lo",
140 "exec_hi", "tma_lo", "tma_hi", "tba_lo", "tba_hi",
145 Info
.setRequiresImmediate(-16, 64);
148 Info
.setRequiresImmediate(-32768, 32767);
153 Info
.setRequiresImmediate();
161 if (S
== "DA" || S
== "DB") {
163 Info
.setRequiresImmediate();
167 bool HasLeftParen
= false;
168 if (S
.front() == '{') {
174 if (S
.front() != 'v' && S
.front() != 's' && S
.front() != 'a') {
177 auto E
= S
.find('}');
178 if (!SpecialRegs
.count(S
.substr(0, E
)))
180 S
= S
.drop_front(E
+ 1);
183 // Found {S} where S is a special register.
184 Info
.setAllowsRegister();
193 Info
.setAllowsRegister();
197 bool HasLeftBracket
= false;
198 if (!S
.empty() && S
.front() == '[') {
199 HasLeftBracket
= true;
202 unsigned long long N
;
203 if (S
.empty() || consumeUnsignedInteger(S
, 10, N
))
205 if (!S
.empty() && S
.front() == ':') {
209 unsigned long long M
;
210 if (consumeUnsignedInteger(S
, 10, M
) || N
>= M
)
213 if (HasLeftBracket
) {
214 if (S
.empty() || S
.front() != ']')
218 if (S
.empty() || S
.front() != '}')
223 // Found {vn}, {sn}, {an}, {v[n]}, {s[n]}, {a[n]}, {v[n:m]}, {s[n:m]}
225 Info
.setAllowsRegister();
230 // \p Constraint will be left pointing at the last character of
231 // the constraint. In practice, it won't be changed unless the
232 // constraint is longer than one character.
233 std::string
convertConstraint(const char *&Constraint
) const override
{
235 StringRef
S(Constraint
);
236 if (S
== "DA" || S
== "DB") {
237 return std::string("^") + std::string(Constraint
++, 2);
240 const char *Begin
= Constraint
;
241 TargetInfo::ConstraintInfo
Info("", "");
242 if (validateAsmConstraint(Constraint
, Info
))
243 return std::string(Begin
).substr(0, Constraint
- Begin
+ 1);
246 return std::string(1, *Constraint
);
250 initFeatureMap(llvm::StringMap
<bool> &Features
, DiagnosticsEngine
&Diags
,
252 const std::vector
<std::string
> &FeatureVec
) const override
;
254 ArrayRef
<Builtin::Info
> getTargetBuiltins() const override
;
256 bool useFP16ConversionIntrinsics() const override
{ return false; }
258 void getTargetDefines(const LangOptions
&Opts
,
259 MacroBuilder
&Builder
) const override
;
261 BuiltinVaListKind
getBuiltinVaListKind() const override
{
262 return TargetInfo::CharPtrBuiltinVaList
;
265 bool isValidCPUName(StringRef Name
) const override
{
266 if (getTriple().getArch() == llvm::Triple::amdgcn
)
267 return llvm::AMDGPU::parseArchAMDGCN(Name
) != llvm::AMDGPU::GK_NONE
;
268 return llvm::AMDGPU::parseArchR600(Name
) != llvm::AMDGPU::GK_NONE
;
271 void fillValidCPUList(SmallVectorImpl
<StringRef
> &Values
) const override
;
273 bool setCPU(const std::string
&Name
) override
{
274 if (getTriple().getArch() == llvm::Triple::amdgcn
) {
275 GPUKind
= llvm::AMDGPU::parseArchAMDGCN(Name
);
276 GPUFeatures
= llvm::AMDGPU::getArchAttrAMDGCN(GPUKind
);
278 GPUKind
= llvm::AMDGPU::parseArchR600(Name
);
279 GPUFeatures
= llvm::AMDGPU::getArchAttrR600(GPUKind
);
282 return GPUKind
!= llvm::AMDGPU::GK_NONE
;
285 void setSupportedOpenCLOpts() override
{
286 auto &Opts
= getSupportedOpenCLOpts();
287 Opts
["cl_clang_storage_class_specifiers"] = true;
288 Opts
["__cl_clang_variadic_functions"] = true;
289 Opts
["__cl_clang_function_pointers"] = true;
290 Opts
["__cl_clang_non_portable_kernel_param_types"] = true;
291 Opts
["__cl_clang_bitfields"] = true;
293 bool IsAMDGCN
= isAMDGCN(getTriple());
295 Opts
["cl_khr_fp64"] = hasFP64();
296 Opts
["__opencl_c_fp64"] = hasFP64();
298 if (IsAMDGCN
|| GPUKind
>= llvm::AMDGPU::GK_CEDAR
) {
299 Opts
["cl_khr_byte_addressable_store"] = true;
300 Opts
["cl_khr_global_int32_base_atomics"] = true;
301 Opts
["cl_khr_global_int32_extended_atomics"] = true;
302 Opts
["cl_khr_local_int32_base_atomics"] = true;
303 Opts
["cl_khr_local_int32_extended_atomics"] = true;
307 Opts
["cl_khr_fp16"] = true;
308 Opts
["cl_khr_int64_base_atomics"] = true;
309 Opts
["cl_khr_int64_extended_atomics"] = true;
310 Opts
["cl_khr_mipmap_image"] = true;
311 Opts
["cl_khr_mipmap_image_writes"] = true;
312 Opts
["cl_khr_subgroups"] = true;
313 Opts
["cl_amd_media_ops"] = true;
314 Opts
["cl_amd_media_ops2"] = true;
316 Opts
["__opencl_c_images"] = true;
317 Opts
["__opencl_c_3d_image_writes"] = true;
318 Opts
["cl_khr_3d_image_writes"] = true;
322 LangAS
getOpenCLTypeAddrSpace(OpenCLTypeKind TK
) const override
{
325 return LangAS::opencl_constant
;
329 case OCLTK_ReserveID
:
330 return LangAS::opencl_global
;
333 return TargetInfo::getOpenCLTypeAddrSpace(TK
);
337 LangAS
getOpenCLBuiltinAddressSpace(unsigned AS
) const override
{
340 return LangAS::opencl_generic
;
342 return LangAS::opencl_global
;
344 return LangAS::opencl_local
;
346 return LangAS::opencl_constant
;
348 return LangAS::opencl_private
;
350 return getLangASFromTargetAS(AS
);
354 LangAS
getCUDABuiltinAddressSpace(unsigned AS
) const override
{
357 return LangAS::Default
;
359 return LangAS::cuda_device
;
361 return LangAS::cuda_shared
;
363 return LangAS::cuda_constant
;
365 return getLangASFromTargetAS(AS
);
369 llvm::Optional
<LangAS
> getConstantAddressSpace() const override
{
370 return getLangASFromTargetAS(Constant
);
373 const llvm::omp::GV
&getGridValue() const override
{
374 switch (WavefrontSize
) {
376 return llvm::omp::getAMDGPUGridValues
<32>();
378 return llvm::omp::getAMDGPUGridValues
<64>();
380 llvm_unreachable("getGridValue not implemented for this wavesize");
384 /// \returns Target specific vtbl ptr address space.
385 unsigned getVtblPtrAddressSpace() const override
{
386 return static_cast<unsigned>(Constant
);
389 /// \returns If a target requires an address within a target specific address
390 /// space \p AddressSpace to be converted in order to be used, then return the
391 /// corresponding target specific DWARF address space.
393 /// \returns Otherwise return None and no conversion will be emitted in the
396 getDWARFAddressSpace(unsigned AddressSpace
) const override
{
397 const unsigned DWARF_Private
= 1;
398 const unsigned DWARF_Local
= 2;
399 if (AddressSpace
== Private
) {
400 return DWARF_Private
;
401 } else if (AddressSpace
== Local
) {
408 CallingConvCheckResult
checkCallingConvention(CallingConv CC
) const override
{
413 case CC_OpenCLKernel
:
414 case CC_AMDGPUKernelCall
:
419 // In amdgcn target the null pointer in global, constant, and generic
420 // address space has value 0 but in private and local address space has
422 uint64_t getNullPointerValue(LangAS AS
) const override
{
423 // FIXME: Also should handle region.
424 return (AS
== LangAS::opencl_local
|| AS
== LangAS::opencl_private
)
428 void setAuxTarget(const TargetInfo
*Aux
) override
;
430 bool hasBitIntType() const override
{ return true; }
432 // Record offload arch features since they are needed for defining the
433 // pre-defined macros.
434 bool handleTargetFeatures(std::vector
<std::string
> &Features
,
435 DiagnosticsEngine
&Diags
) override
{
436 auto TargetIDFeatures
=
437 getAllPossibleTargetIDFeatures(getTriple(), getArchNameAMDGCN(GPUKind
));
438 for (const auto &F
: Features
) {
439 assert(F
.front() == '+' || F
.front() == '-');
440 if (F
== "+wavefrontsize64")
442 bool IsOn
= F
.front() == '+';
443 StringRef Name
= StringRef(F
).drop_front();
444 if (!llvm::is_contained(TargetIDFeatures
, Name
))
446 assert(OffloadArchFeatures
.find(Name
) == OffloadArchFeatures
.end());
447 OffloadArchFeatures
[Name
] = IsOn
;
452 Optional
<std::string
> getTargetID() const override
{
453 if (!isAMDGCN(getTriple()))
455 // When -target-cpu is not set, we assume generic code that it is valid
456 // for all GPU and use an empty string as target ID to represent that.
457 if (GPUKind
== llvm::AMDGPU::GK_NONE
)
458 return std::string("");
459 return getCanonicalTargetID(getArchNameAMDGCN(GPUKind
),
460 OffloadArchFeatures
);
464 } // namespace targets
467 #endif // LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H