[docs] Add LICENSE.txt to the root of the mono-repo
[llvm-project.git] / clang / lib / Basic / Targets / NVPTX.cpp
blob9dd60adb00fb3bd5397823e38077d6ef793c405b
1 //===--- NVPTX.cpp - Implement NVPTX target feature support ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements NVPTX TargetInfo objects.
11 //===----------------------------------------------------------------------===//
13 #include "NVPTX.h"
14 #include "Targets.h"
15 #include "clang/Basic/Builtins.h"
16 #include "clang/Basic/MacroBuilder.h"
17 #include "clang/Basic/TargetBuiltins.h"
18 #include "llvm/ADT/StringSwitch.h"
20 using namespace clang;
21 using namespace clang::targets;
23 const Builtin::Info NVPTXTargetInfo::BuiltinInfo[] = {
24 #define BUILTIN(ID, TYPE, ATTRS) \
25 {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
26 #define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
27 {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
28 #define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
29 {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
30 #include "clang/Basic/BuiltinsNVPTX.def"
33 const char *const NVPTXTargetInfo::GCCRegNames[] = {"r0"};
35 NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
36 const TargetOptions &Opts,
37 unsigned TargetPointerWidth)
38 : TargetInfo(Triple) {
39 assert((TargetPointerWidth == 32 || TargetPointerWidth == 64) &&
40 "NVPTX only supports 32- and 64-bit modes.");
42 PTXVersion = 32;
43 for (const StringRef Feature : Opts.FeaturesAsWritten) {
44 if (!Feature.startswith("+ptx"))
45 continue;
46 PTXVersion = llvm::StringSwitch<unsigned>(Feature)
47 .Case("+ptx75", 75)
48 .Case("+ptx74", 74)
49 .Case("+ptx73", 73)
50 .Case("+ptx72", 72)
51 .Case("+ptx71", 71)
52 .Case("+ptx70", 70)
53 .Case("+ptx65", 65)
54 .Case("+ptx64", 64)
55 .Case("+ptx63", 63)
56 .Case("+ptx61", 61)
57 .Case("+ptx60", 60)
58 .Case("+ptx50", 50)
59 .Case("+ptx43", 43)
60 .Case("+ptx42", 42)
61 .Case("+ptx41", 41)
62 .Case("+ptx40", 40)
63 .Case("+ptx32", 32)
64 .Default(32);
67 TLSSupported = false;
68 VLASupported = false;
69 AddrSpaceMap = &NVPTXAddrSpaceMap;
70 UseAddrSpaceMapMangling = true;
72 // Define available target features
73 // These must be defined in sorted order!
74 NoAsmVariants = true;
75 GPU = CudaArch::SM_20;
77 if (TargetPointerWidth == 32)
78 resetDataLayout("e-p:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
79 else if (Opts.NVPTXUseShortPointers)
80 resetDataLayout(
81 "e-p3:32:32-p4:32:32-p5:32:32-i64:64-i128:128-v16:16-v32:32-n16:32:64");
82 else
83 resetDataLayout("e-i64:64-i128:128-v16:16-v32:32-n16:32:64");
85 // If possible, get a TargetInfo for our host triple, so we can match its
86 // types.
87 llvm::Triple HostTriple(Opts.HostTriple);
88 if (!HostTriple.isNVPTX())
89 HostTarget.reset(AllocateTarget(llvm::Triple(Opts.HostTriple), Opts));
91 // If no host target, make some guesses about the data layout and return.
92 if (!HostTarget) {
93 LongWidth = LongAlign = TargetPointerWidth;
94 PointerWidth = PointerAlign = TargetPointerWidth;
95 switch (TargetPointerWidth) {
96 case 32:
97 SizeType = TargetInfo::UnsignedInt;
98 PtrDiffType = TargetInfo::SignedInt;
99 IntPtrType = TargetInfo::SignedInt;
100 break;
101 case 64:
102 SizeType = TargetInfo::UnsignedLong;
103 PtrDiffType = TargetInfo::SignedLong;
104 IntPtrType = TargetInfo::SignedLong;
105 break;
106 default:
107 llvm_unreachable("TargetPointerWidth must be 32 or 64");
109 return;
112 // Copy properties from host target.
113 PointerWidth = HostTarget->getPointerWidth(/* AddrSpace = */ 0);
114 PointerAlign = HostTarget->getPointerAlign(/* AddrSpace = */ 0);
115 BoolWidth = HostTarget->getBoolWidth();
116 BoolAlign = HostTarget->getBoolAlign();
117 IntWidth = HostTarget->getIntWidth();
118 IntAlign = HostTarget->getIntAlign();
119 HalfWidth = HostTarget->getHalfWidth();
120 HalfAlign = HostTarget->getHalfAlign();
121 FloatWidth = HostTarget->getFloatWidth();
122 FloatAlign = HostTarget->getFloatAlign();
123 DoubleWidth = HostTarget->getDoubleWidth();
124 DoubleAlign = HostTarget->getDoubleAlign();
125 LongWidth = HostTarget->getLongWidth();
126 LongAlign = HostTarget->getLongAlign();
127 LongLongWidth = HostTarget->getLongLongWidth();
128 LongLongAlign = HostTarget->getLongLongAlign();
129 MinGlobalAlign = HostTarget->getMinGlobalAlign(/* TypeSize = */ 0);
130 NewAlign = HostTarget->getNewAlign();
131 DefaultAlignForAttributeAligned =
132 HostTarget->getDefaultAlignForAttributeAligned();
133 SizeType = HostTarget->getSizeType();
134 IntMaxType = HostTarget->getIntMaxType();
135 PtrDiffType = HostTarget->getPtrDiffType(/* AddrSpace = */ 0);
136 IntPtrType = HostTarget->getIntPtrType();
137 WCharType = HostTarget->getWCharType();
138 WIntType = HostTarget->getWIntType();
139 Char16Type = HostTarget->getChar16Type();
140 Char32Type = HostTarget->getChar32Type();
141 Int64Type = HostTarget->getInt64Type();
142 SigAtomicType = HostTarget->getSigAtomicType();
143 ProcessIDType = HostTarget->getProcessIDType();
145 UseBitFieldTypeAlignment = HostTarget->useBitFieldTypeAlignment();
146 UseZeroLengthBitfieldAlignment = HostTarget->useZeroLengthBitfieldAlignment();
147 UseExplicitBitFieldAlignment = HostTarget->useExplicitBitFieldAlignment();
148 ZeroLengthBitfieldBoundary = HostTarget->getZeroLengthBitfieldBoundary();
150 // This is a bit of a lie, but it controls __GCC_ATOMIC_XXX_LOCK_FREE, and
151 // we need those macros to be identical on host and device, because (among
152 // other things) they affect which standard library classes are defined, and
153 // we need all classes to be defined on both the host and device.
154 MaxAtomicInlineWidth = HostTarget->getMaxAtomicInlineWidth();
156 // Properties intentionally not copied from host:
157 // - LargeArrayMinWidth, LargeArrayAlign: Not visible across the
158 // host/device boundary.
159 // - SuitableAlign: Not visible across the host/device boundary, and may
160 // correctly be different on host/device, e.g. if host has wider vector
161 // types than device.
162 // - LongDoubleWidth, LongDoubleAlign: nvptx's long double type is the same
163 // as its double type, but that's not necessarily true on the host.
164 // TODO: nvcc emits a warning when using long double on device; we should
165 // do the same.
168 ArrayRef<const char *> NVPTXTargetInfo::getGCCRegNames() const {
169 return llvm::makeArrayRef(GCCRegNames);
172 bool NVPTXTargetInfo::hasFeature(StringRef Feature) const {
173 return llvm::StringSwitch<bool>(Feature)
174 .Cases("ptx", "nvptx", true)
175 .Default(false);
178 void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
179 MacroBuilder &Builder) const {
180 Builder.defineMacro("__PTX__");
181 Builder.defineMacro("__NVPTX__");
182 if (Opts.CUDAIsDevice || Opts.OpenMPIsDevice) {
183 // Set __CUDA_ARCH__ for the GPU specified.
184 std::string CUDAArchCode = [this] {
185 switch (GPU) {
186 case CudaArch::GFX600:
187 case CudaArch::GFX601:
188 case CudaArch::GFX602:
189 case CudaArch::GFX700:
190 case CudaArch::GFX701:
191 case CudaArch::GFX702:
192 case CudaArch::GFX703:
193 case CudaArch::GFX704:
194 case CudaArch::GFX705:
195 case CudaArch::GFX801:
196 case CudaArch::GFX802:
197 case CudaArch::GFX803:
198 case CudaArch::GFX805:
199 case CudaArch::GFX810:
200 case CudaArch::GFX900:
201 case CudaArch::GFX902:
202 case CudaArch::GFX904:
203 case CudaArch::GFX906:
204 case CudaArch::GFX908:
205 case CudaArch::GFX909:
206 case CudaArch::GFX90a:
207 case CudaArch::GFX90c:
208 case CudaArch::GFX940:
209 case CudaArch::GFX1010:
210 case CudaArch::GFX1011:
211 case CudaArch::GFX1012:
212 case CudaArch::GFX1013:
213 case CudaArch::GFX1030:
214 case CudaArch::GFX1031:
215 case CudaArch::GFX1032:
216 case CudaArch::GFX1033:
217 case CudaArch::GFX1034:
218 case CudaArch::GFX1035:
219 case CudaArch::GFX1036:
220 case CudaArch::GFX1100:
221 case CudaArch::GFX1101:
222 case CudaArch::GFX1102:
223 case CudaArch::GFX1103:
224 case CudaArch::Generic:
225 case CudaArch::LAST:
226 break;
227 case CudaArch::UNUSED:
228 case CudaArch::UNKNOWN:
229 assert(false && "No GPU arch when compiling CUDA device code.");
230 return "";
231 case CudaArch::SM_20:
232 return "200";
233 case CudaArch::SM_21:
234 return "210";
235 case CudaArch::SM_30:
236 return "300";
237 case CudaArch::SM_32:
238 return "320";
239 case CudaArch::SM_35:
240 return "350";
241 case CudaArch::SM_37:
242 return "370";
243 case CudaArch::SM_50:
244 return "500";
245 case CudaArch::SM_52:
246 return "520";
247 case CudaArch::SM_53:
248 return "530";
249 case CudaArch::SM_60:
250 return "600";
251 case CudaArch::SM_61:
252 return "610";
253 case CudaArch::SM_62:
254 return "620";
255 case CudaArch::SM_70:
256 return "700";
257 case CudaArch::SM_72:
258 return "720";
259 case CudaArch::SM_75:
260 return "750";
261 case CudaArch::SM_80:
262 return "800";
263 case CudaArch::SM_86:
264 return "860";
266 llvm_unreachable("unhandled CudaArch");
267 }();
268 Builder.defineMacro("__CUDA_ARCH__", CUDAArchCode);
272 ArrayRef<Builtin::Info> NVPTXTargetInfo::getTargetBuiltins() const {
273 return llvm::makeArrayRef(BuiltinInfo, clang::NVPTX::LastTSBuiltin -
274 Builtin::FirstTSBuiltin);