[docs] Add LICENSE.txt to the root of the mono-repo
[llvm-project.git] / clang / lib / Basic / Targets / AMDGPU.h
blob5e73a3cb8019adeb998f9ebcb0b8ff60ce2d2f57
1 //===--- AMDGPU.h - Declare AMDGPU target feature support -------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares AMDGPU TargetInfo objects.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H
14 #define LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H
16 #include "clang/Basic/TargetID.h"
17 #include "clang/Basic/TargetInfo.h"
18 #include "clang/Basic/TargetOptions.h"
19 #include "llvm/ADT/StringSet.h"
20 #include "llvm/ADT/Triple.h"
21 #include "llvm/Support/Compiler.h"
22 #include "llvm/Support/TargetParser.h"
24 namespace clang {
25 namespace targets {
27 class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
29 static const Builtin::Info BuiltinInfo[];
30 static const char *const GCCRegNames[];
32 enum AddrSpace {
33 Generic = 0,
34 Global = 1,
35 Local = 3,
36 Constant = 4,
37 Private = 5
39 static const LangASMap AMDGPUDefIsGenMap;
40 static const LangASMap AMDGPUDefIsPrivMap;
42 llvm::AMDGPU::GPUKind GPUKind;
43 unsigned GPUFeatures;
44 unsigned WavefrontSize;
46 /// Target ID is device name followed by optional feature name postfixed
47 /// by plus or minus sign delimitted by colon, e.g. gfx908:xnack+:sramecc-.
48 /// If the target ID contains feature+, map it to true.
49 /// If the target ID contains feature-, map it to false.
50 /// If the target ID does not contain a feature (default), do not map it.
51 llvm::StringMap<bool> OffloadArchFeatures;
52 std::string TargetID;
54 bool hasFP64() const {
55 return getTriple().getArch() == llvm::Triple::amdgcn ||
56 !!(GPUFeatures & llvm::AMDGPU::FEATURE_FP64);
59 /// Has fast fma f32
60 bool hasFastFMAF() const {
61 return !!(GPUFeatures & llvm::AMDGPU::FEATURE_FAST_FMA_F32);
64 /// Has fast fma f64
65 bool hasFastFMA() const {
66 return getTriple().getArch() == llvm::Triple::amdgcn;
69 bool hasFMAF() const {
70 return getTriple().getArch() == llvm::Triple::amdgcn ||
71 !!(GPUFeatures & llvm::AMDGPU::FEATURE_FMA);
74 bool hasFullRateDenormalsF32() const {
75 return !!(GPUFeatures & llvm::AMDGPU::FEATURE_FAST_DENORMAL_F32);
78 bool hasLDEXPF() const {
79 return getTriple().getArch() == llvm::Triple::amdgcn ||
80 !!(GPUFeatures & llvm::AMDGPU::FEATURE_LDEXP);
83 static bool isAMDGCN(const llvm::Triple &TT) {
84 return TT.getArch() == llvm::Triple::amdgcn;
87 static bool isR600(const llvm::Triple &TT) {
88 return TT.getArch() == llvm::Triple::r600;
91 public:
92 AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts);
94 void setAddressSpaceMap(bool DefaultIsPrivate);
96 void adjust(DiagnosticsEngine &Diags, LangOptions &Opts) override;
98 uint64_t getPointerWidthV(unsigned AddrSpace) const override {
99 if (isR600(getTriple()))
100 return 32;
102 if (AddrSpace == Private || AddrSpace == Local)
103 return 32;
105 return 64;
108 uint64_t getPointerAlignV(unsigned AddrSpace) const override {
109 return getPointerWidthV(AddrSpace);
112 uint64_t getMaxPointerWidth() const override {
113 return getTriple().getArch() == llvm::Triple::amdgcn ? 64 : 32;
116 const char *getClobbers() const override { return ""; }
118 ArrayRef<const char *> getGCCRegNames() const override;
120 ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
121 return None;
124 /// Accepted register names: (n, m is unsigned integer, n < m)
125 /// v
126 /// s
127 /// a
128 /// {vn}, {v[n]}
129 /// {sn}, {s[n]}
130 /// {an}, {a[n]}
131 /// {S} , where S is a special register name
132 ////{v[n:m]}
133 /// {s[n:m]}
134 /// {a[n:m]}
135 bool validateAsmConstraint(const char *&Name,
136 TargetInfo::ConstraintInfo &Info) const override {
137 static const ::llvm::StringSet<> SpecialRegs({
138 "exec", "vcc", "flat_scratch", "m0", "scc", "tba", "tma",
139 "flat_scratch_lo", "flat_scratch_hi", "vcc_lo", "vcc_hi", "exec_lo",
140 "exec_hi", "tma_lo", "tma_hi", "tba_lo", "tba_hi",
143 switch (*Name) {
144 case 'I':
145 Info.setRequiresImmediate(-16, 64);
146 return true;
147 case 'J':
148 Info.setRequiresImmediate(-32768, 32767);
149 return true;
150 case 'A':
151 case 'B':
152 case 'C':
153 Info.setRequiresImmediate();
154 return true;
155 default:
156 break;
159 StringRef S(Name);
161 if (S == "DA" || S == "DB") {
162 Name++;
163 Info.setRequiresImmediate();
164 return true;
167 bool HasLeftParen = false;
168 if (S.front() == '{') {
169 HasLeftParen = true;
170 S = S.drop_front();
172 if (S.empty())
173 return false;
174 if (S.front() != 'v' && S.front() != 's' && S.front() != 'a') {
175 if (!HasLeftParen)
176 return false;
177 auto E = S.find('}');
178 if (!SpecialRegs.count(S.substr(0, E)))
179 return false;
180 S = S.drop_front(E + 1);
181 if (!S.empty())
182 return false;
183 // Found {S} where S is a special register.
184 Info.setAllowsRegister();
185 Name = S.data() - 1;
186 return true;
188 S = S.drop_front();
189 if (!HasLeftParen) {
190 if (!S.empty())
191 return false;
192 // Found s, v or a.
193 Info.setAllowsRegister();
194 Name = S.data() - 1;
195 return true;
197 bool HasLeftBracket = false;
198 if (!S.empty() && S.front() == '[') {
199 HasLeftBracket = true;
200 S = S.drop_front();
202 unsigned long long N;
203 if (S.empty() || consumeUnsignedInteger(S, 10, N))
204 return false;
205 if (!S.empty() && S.front() == ':') {
206 if (!HasLeftBracket)
207 return false;
208 S = S.drop_front();
209 unsigned long long M;
210 if (consumeUnsignedInteger(S, 10, M) || N >= M)
211 return false;
213 if (HasLeftBracket) {
214 if (S.empty() || S.front() != ']')
215 return false;
216 S = S.drop_front();
218 if (S.empty() || S.front() != '}')
219 return false;
220 S = S.drop_front();
221 if (!S.empty())
222 return false;
223 // Found {vn}, {sn}, {an}, {v[n]}, {s[n]}, {a[n]}, {v[n:m]}, {s[n:m]}
224 // or {a[n:m]}.
225 Info.setAllowsRegister();
226 Name = S.data() - 1;
227 return true;
230 // \p Constraint will be left pointing at the last character of
231 // the constraint. In practice, it won't be changed unless the
232 // constraint is longer than one character.
233 std::string convertConstraint(const char *&Constraint) const override {
235 StringRef S(Constraint);
236 if (S == "DA" || S == "DB") {
237 return std::string("^") + std::string(Constraint++, 2);
240 const char *Begin = Constraint;
241 TargetInfo::ConstraintInfo Info("", "");
242 if (validateAsmConstraint(Constraint, Info))
243 return std::string(Begin).substr(0, Constraint - Begin + 1);
245 Constraint = Begin;
246 return std::string(1, *Constraint);
249 bool
250 initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
251 StringRef CPU,
252 const std::vector<std::string> &FeatureVec) const override;
254 ArrayRef<Builtin::Info> getTargetBuiltins() const override;
256 bool useFP16ConversionIntrinsics() const override { return false; }
258 void getTargetDefines(const LangOptions &Opts,
259 MacroBuilder &Builder) const override;
261 BuiltinVaListKind getBuiltinVaListKind() const override {
262 return TargetInfo::CharPtrBuiltinVaList;
265 bool isValidCPUName(StringRef Name) const override {
266 if (getTriple().getArch() == llvm::Triple::amdgcn)
267 return llvm::AMDGPU::parseArchAMDGCN(Name) != llvm::AMDGPU::GK_NONE;
268 return llvm::AMDGPU::parseArchR600(Name) != llvm::AMDGPU::GK_NONE;
271 void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
273 bool setCPU(const std::string &Name) override {
274 if (getTriple().getArch() == llvm::Triple::amdgcn) {
275 GPUKind = llvm::AMDGPU::parseArchAMDGCN(Name);
276 GPUFeatures = llvm::AMDGPU::getArchAttrAMDGCN(GPUKind);
277 } else {
278 GPUKind = llvm::AMDGPU::parseArchR600(Name);
279 GPUFeatures = llvm::AMDGPU::getArchAttrR600(GPUKind);
282 return GPUKind != llvm::AMDGPU::GK_NONE;
285 void setSupportedOpenCLOpts() override {
286 auto &Opts = getSupportedOpenCLOpts();
287 Opts["cl_clang_storage_class_specifiers"] = true;
288 Opts["__cl_clang_variadic_functions"] = true;
289 Opts["__cl_clang_function_pointers"] = true;
290 Opts["__cl_clang_non_portable_kernel_param_types"] = true;
291 Opts["__cl_clang_bitfields"] = true;
293 bool IsAMDGCN = isAMDGCN(getTriple());
295 Opts["cl_khr_fp64"] = hasFP64();
296 Opts["__opencl_c_fp64"] = hasFP64();
298 if (IsAMDGCN || GPUKind >= llvm::AMDGPU::GK_CEDAR) {
299 Opts["cl_khr_byte_addressable_store"] = true;
300 Opts["cl_khr_global_int32_base_atomics"] = true;
301 Opts["cl_khr_global_int32_extended_atomics"] = true;
302 Opts["cl_khr_local_int32_base_atomics"] = true;
303 Opts["cl_khr_local_int32_extended_atomics"] = true;
306 if (IsAMDGCN) {
307 Opts["cl_khr_fp16"] = true;
308 Opts["cl_khr_int64_base_atomics"] = true;
309 Opts["cl_khr_int64_extended_atomics"] = true;
310 Opts["cl_khr_mipmap_image"] = true;
311 Opts["cl_khr_mipmap_image_writes"] = true;
312 Opts["cl_khr_subgroups"] = true;
313 Opts["cl_amd_media_ops"] = true;
314 Opts["cl_amd_media_ops2"] = true;
316 Opts["__opencl_c_images"] = true;
317 Opts["__opencl_c_3d_image_writes"] = true;
318 Opts["cl_khr_3d_image_writes"] = true;
322 LangAS getOpenCLTypeAddrSpace(OpenCLTypeKind TK) const override {
323 switch (TK) {
324 case OCLTK_Image:
325 return LangAS::opencl_constant;
327 case OCLTK_ClkEvent:
328 case OCLTK_Queue:
329 case OCLTK_ReserveID:
330 return LangAS::opencl_global;
332 default:
333 return TargetInfo::getOpenCLTypeAddrSpace(TK);
337 LangAS getOpenCLBuiltinAddressSpace(unsigned AS) const override {
338 switch (AS) {
339 case 0:
340 return LangAS::opencl_generic;
341 case 1:
342 return LangAS::opencl_global;
343 case 3:
344 return LangAS::opencl_local;
345 case 4:
346 return LangAS::opencl_constant;
347 case 5:
348 return LangAS::opencl_private;
349 default:
350 return getLangASFromTargetAS(AS);
354 LangAS getCUDABuiltinAddressSpace(unsigned AS) const override {
355 switch (AS) {
356 case 0:
357 return LangAS::Default;
358 case 1:
359 return LangAS::cuda_device;
360 case 3:
361 return LangAS::cuda_shared;
362 case 4:
363 return LangAS::cuda_constant;
364 default:
365 return getLangASFromTargetAS(AS);
369 llvm::Optional<LangAS> getConstantAddressSpace() const override {
370 return getLangASFromTargetAS(Constant);
373 const llvm::omp::GV &getGridValue() const override {
374 switch (WavefrontSize) {
375 case 32:
376 return llvm::omp::getAMDGPUGridValues<32>();
377 case 64:
378 return llvm::omp::getAMDGPUGridValues<64>();
379 default:
380 llvm_unreachable("getGridValue not implemented for this wavesize");
384 /// \returns Target specific vtbl ptr address space.
385 unsigned getVtblPtrAddressSpace() const override {
386 return static_cast<unsigned>(Constant);
389 /// \returns If a target requires an address within a target specific address
390 /// space \p AddressSpace to be converted in order to be used, then return the
391 /// corresponding target specific DWARF address space.
393 /// \returns Otherwise return None and no conversion will be emitted in the
394 /// DWARF.
395 Optional<unsigned>
396 getDWARFAddressSpace(unsigned AddressSpace) const override {
397 const unsigned DWARF_Private = 1;
398 const unsigned DWARF_Local = 2;
399 if (AddressSpace == Private) {
400 return DWARF_Private;
401 } else if (AddressSpace == Local) {
402 return DWARF_Local;
403 } else {
404 return None;
408 CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
409 switch (CC) {
410 default:
411 return CCCR_Warning;
412 case CC_C:
413 case CC_OpenCLKernel:
414 case CC_AMDGPUKernelCall:
415 return CCCR_OK;
419 // In amdgcn target the null pointer in global, constant, and generic
420 // address space has value 0 but in private and local address space has
421 // value ~0.
422 uint64_t getNullPointerValue(LangAS AS) const override {
423 // FIXME: Also should handle region.
424 return (AS == LangAS::opencl_local || AS == LangAS::opencl_private)
425 ? ~0 : 0;
428 void setAuxTarget(const TargetInfo *Aux) override;
430 bool hasBitIntType() const override { return true; }
432 // Record offload arch features since they are needed for defining the
433 // pre-defined macros.
434 bool handleTargetFeatures(std::vector<std::string> &Features,
435 DiagnosticsEngine &Diags) override {
436 auto TargetIDFeatures =
437 getAllPossibleTargetIDFeatures(getTriple(), getArchNameAMDGCN(GPUKind));
438 for (const auto &F : Features) {
439 assert(F.front() == '+' || F.front() == '-');
440 if (F == "+wavefrontsize64")
441 WavefrontSize = 64;
442 bool IsOn = F.front() == '+';
443 StringRef Name = StringRef(F).drop_front();
444 if (!llvm::is_contained(TargetIDFeatures, Name))
445 continue;
446 assert(OffloadArchFeatures.find(Name) == OffloadArchFeatures.end());
447 OffloadArchFeatures[Name] = IsOn;
449 return true;
452 Optional<std::string> getTargetID() const override {
453 if (!isAMDGCN(getTriple()))
454 return llvm::None;
455 // When -target-cpu is not set, we assume generic code that it is valid
456 // for all GPU and use an empty string as target ID to represent that.
457 if (GPUKind == llvm::AMDGPU::GK_NONE)
458 return std::string("");
459 return getCanonicalTargetID(getArchNameAMDGCN(GPUKind),
460 OffloadArchFeatures);
464 } // namespace targets
465 } // namespace clang
467 #endif // LLVM_CLANG_LIB_BASIC_TARGETS_AMDGPU_H