[Alignment][NFC] Use Align with TargetLowering::setMinFunctionAlignment
[llvm-core.git] / lib / Target / ARM / MCTargetDesc / ARMTargetStreamer.cpp
blobb863517c0cca922bed29466dd1e8f71e1450d831
1 //===- ARMTargetStreamer.cpp - ARMTargetStreamer class --*- C++ -*---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ARMTargetStreamer class.
11 //===----------------------------------------------------------------------===//
13 #include "MCTargetDesc/ARMMCTargetDesc.h"
14 #include "llvm/MC/ConstantPools.h"
15 #include "llvm/MC/MCAsmInfo.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCStreamer.h"
19 #include "llvm/MC/MCSubtargetInfo.h"
20 #include "llvm/Support/ARMBuildAttributes.h"
21 #include "llvm/Support/TargetParser.h"
23 using namespace llvm;
26 // ARMTargetStreamer Implemenation
29 ARMTargetStreamer::ARMTargetStreamer(MCStreamer &S)
30 : MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {}
32 ARMTargetStreamer::~ARMTargetStreamer() = default;
34 // The constant pool handling is shared by all ARMTargetStreamer
35 // implementations.
36 const MCExpr *ARMTargetStreamer::addConstantPoolEntry(const MCExpr *Expr, SMLoc Loc) {
37 return ConstantPools->addEntry(Streamer, Expr, 4, Loc);
40 void ARMTargetStreamer::emitCurrentConstantPool() {
41 ConstantPools->emitForCurrentSection(Streamer);
42 ConstantPools->clearCacheForCurrentSection(Streamer);
45 // finish() - write out any non-empty assembler constant pools.
46 void ARMTargetStreamer::finish() { ConstantPools->emitAll(Streamer); }
48 // reset() - Reset any state
49 void ARMTargetStreamer::reset() {}
51 void ARMTargetStreamer::emitInst(uint32_t Inst, char Suffix) {
52 unsigned Size;
53 char Buffer[4];
54 const bool LittleEndian = getStreamer().getContext().getAsmInfo()->isLittleEndian();
56 switch (Suffix) {
57 case '\0':
58 Size = 4;
60 for (unsigned II = 0, IE = Size; II != IE; II++) {
61 const unsigned I = LittleEndian ? (Size - II - 1) : II;
62 Buffer[Size - II - 1] = uint8_t(Inst >> I * CHAR_BIT);
65 break;
66 case 'n':
67 case 'w':
68 Size = (Suffix == 'n' ? 2 : 4);
70 // Thumb wide instructions are emitted as a pair of 16-bit words of the
71 // appropriate endianness.
72 for (unsigned II = 0, IE = Size; II != IE; II = II + 2) {
73 const unsigned I0 = LittleEndian ? II + 0 : II + 1;
74 const unsigned I1 = LittleEndian ? II + 1 : II + 0;
75 Buffer[Size - II - 2] = uint8_t(Inst >> I0 * CHAR_BIT);
76 Buffer[Size - II - 1] = uint8_t(Inst >> I1 * CHAR_BIT);
79 break;
80 default:
81 llvm_unreachable("Invalid Suffix");
83 getStreamer().EmitBytes(StringRef(Buffer, Size));
86 // The remaining callbacks should be handled separately by each
87 // streamer.
88 void ARMTargetStreamer::emitFnStart() {}
89 void ARMTargetStreamer::emitFnEnd() {}
90 void ARMTargetStreamer::emitCantUnwind() {}
91 void ARMTargetStreamer::emitPersonality(const MCSymbol *Personality) {}
92 void ARMTargetStreamer::emitPersonalityIndex(unsigned Index) {}
93 void ARMTargetStreamer::emitHandlerData() {}
94 void ARMTargetStreamer::emitSetFP(unsigned FpReg, unsigned SpReg,
95 int64_t Offset) {}
96 void ARMTargetStreamer::emitMovSP(unsigned Reg, int64_t Offset) {}
97 void ARMTargetStreamer::emitPad(int64_t Offset) {}
98 void ARMTargetStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList,
99 bool isVector) {}
100 void ARMTargetStreamer::emitUnwindRaw(int64_t StackOffset,
101 const SmallVectorImpl<uint8_t> &Opcodes) {
103 void ARMTargetStreamer::switchVendor(StringRef Vendor) {}
104 void ARMTargetStreamer::emitAttribute(unsigned Attribute, unsigned Value) {}
105 void ARMTargetStreamer::emitTextAttribute(unsigned Attribute,
106 StringRef String) {}
107 void ARMTargetStreamer::emitIntTextAttribute(unsigned Attribute,
108 unsigned IntValue,
109 StringRef StringValue) {}
110 void ARMTargetStreamer::emitArch(ARM::ArchKind Arch) {}
111 void ARMTargetStreamer::emitArchExtension(unsigned ArchExt) {}
112 void ARMTargetStreamer::emitObjectArch(ARM::ArchKind Arch) {}
113 void ARMTargetStreamer::emitFPU(unsigned FPU) {}
114 void ARMTargetStreamer::finishAttributeSection() {}
115 void
116 ARMTargetStreamer::AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE) {}
117 void ARMTargetStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {}
119 static ARMBuildAttrs::CPUArch getArchForCPU(const MCSubtargetInfo &STI) {
120 if (STI.getCPU() == "xscale")
121 return ARMBuildAttrs::v5TEJ;
123 if (STI.hasFeature(ARM::HasV8Ops)) {
124 if (STI.hasFeature(ARM::FeatureRClass))
125 return ARMBuildAttrs::v8_R;
126 return ARMBuildAttrs::v8_A;
127 } else if (STI.hasFeature(ARM::HasV8_1MMainlineOps))
128 return ARMBuildAttrs::v8_1_M_Main;
129 else if (STI.hasFeature(ARM::HasV8MMainlineOps))
130 return ARMBuildAttrs::v8_M_Main;
131 else if (STI.hasFeature(ARM::HasV7Ops)) {
132 if (STI.hasFeature(ARM::FeatureMClass) && STI.hasFeature(ARM::FeatureDSP))
133 return ARMBuildAttrs::v7E_M;
134 return ARMBuildAttrs::v7;
135 } else if (STI.hasFeature(ARM::HasV6T2Ops))
136 return ARMBuildAttrs::v6T2;
137 else if (STI.hasFeature(ARM::HasV8MBaselineOps))
138 return ARMBuildAttrs::v8_M_Base;
139 else if (STI.hasFeature(ARM::HasV6MOps))
140 return ARMBuildAttrs::v6S_M;
141 else if (STI.hasFeature(ARM::HasV6Ops))
142 return ARMBuildAttrs::v6;
143 else if (STI.hasFeature(ARM::HasV5TEOps))
144 return ARMBuildAttrs::v5TE;
145 else if (STI.hasFeature(ARM::HasV5TOps))
146 return ARMBuildAttrs::v5T;
147 else if (STI.hasFeature(ARM::HasV4TOps))
148 return ARMBuildAttrs::v4T;
149 else
150 return ARMBuildAttrs::v4;
153 static bool isV8M(const MCSubtargetInfo &STI) {
154 // Note that v8M Baseline is a subset of v6T2!
155 return (STI.hasFeature(ARM::HasV8MBaselineOps) &&
156 !STI.hasFeature(ARM::HasV6T2Ops)) ||
157 STI.hasFeature(ARM::HasV8MMainlineOps);
160 /// Emit the build attributes that only depend on the hardware that we expect
161 // /to be available, and not on the ABI, or any source-language choices.
162 void ARMTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) {
163 switchVendor("aeabi");
165 const StringRef CPUString = STI.getCPU();
166 if (!CPUString.empty() && !CPUString.startswith("generic")) {
167 // FIXME: remove krait check when GNU tools support krait cpu
168 if (STI.hasFeature(ARM::ProcKrait)) {
169 emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9");
170 // We consider krait as a "cortex-a9" + hwdiv CPU
171 // Enable hwdiv through ".arch_extension idiv"
172 if (STI.hasFeature(ARM::FeatureHWDivThumb) ||
173 STI.hasFeature(ARM::FeatureHWDivARM))
174 emitArchExtension(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM);
175 } else {
176 emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString);
180 emitAttribute(ARMBuildAttrs::CPU_arch, getArchForCPU(STI));
182 if (STI.hasFeature(ARM::FeatureAClass)) {
183 emitAttribute(ARMBuildAttrs::CPU_arch_profile,
184 ARMBuildAttrs::ApplicationProfile);
185 } else if (STI.hasFeature(ARM::FeatureRClass)) {
186 emitAttribute(ARMBuildAttrs::CPU_arch_profile,
187 ARMBuildAttrs::RealTimeProfile);
188 } else if (STI.hasFeature(ARM::FeatureMClass)) {
189 emitAttribute(ARMBuildAttrs::CPU_arch_profile,
190 ARMBuildAttrs::MicroControllerProfile);
193 emitAttribute(ARMBuildAttrs::ARM_ISA_use, STI.hasFeature(ARM::FeatureNoARM)
194 ? ARMBuildAttrs::Not_Allowed
195 : ARMBuildAttrs::Allowed);
197 if (isV8M(STI)) {
198 emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
199 ARMBuildAttrs::AllowThumbDerived);
200 } else if (STI.hasFeature(ARM::FeatureThumb2)) {
201 emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
202 ARMBuildAttrs::AllowThumb32);
203 } else if (STI.hasFeature(ARM::HasV4TOps)) {
204 emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed);
207 if (STI.hasFeature(ARM::FeatureNEON)) {
208 /* NEON is not exactly a VFP architecture, but GAS emit one of
209 * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */
210 if (STI.hasFeature(ARM::FeatureFPARMv8)) {
211 if (STI.hasFeature(ARM::FeatureCrypto))
212 emitFPU(ARM::FK_CRYPTO_NEON_FP_ARMV8);
213 else
214 emitFPU(ARM::FK_NEON_FP_ARMV8);
215 } else if (STI.hasFeature(ARM::FeatureVFP4))
216 emitFPU(ARM::FK_NEON_VFPV4);
217 else
218 emitFPU(STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_NEON_FP16
219 : ARM::FK_NEON);
220 // Emit Tag_Advanced_SIMD_arch for ARMv8 architecture
221 if (STI.hasFeature(ARM::HasV8Ops))
222 emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
223 STI.hasFeature(ARM::HasV8_1aOps)
224 ? ARMBuildAttrs::AllowNeonARMv8_1a
225 : ARMBuildAttrs::AllowNeonARMv8);
226 } else {
227 if (STI.hasFeature(ARM::FeatureFPARMv8_D16_SP))
228 // FPv5 and FP-ARMv8 have the same instructions, so are modeled as one
229 // FPU, but there are two different names for it depending on the CPU.
230 emitFPU(STI.hasFeature(ARM::FeatureD32)
231 ? ARM::FK_FP_ARMV8
232 : (STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_FPV5_D16
233 : ARM::FK_FPV5_SP_D16));
234 else if (STI.hasFeature(ARM::FeatureVFP4_D16_SP))
235 emitFPU(STI.hasFeature(ARM::FeatureD32)
236 ? ARM::FK_VFPV4
237 : (STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_VFPV4_D16
238 : ARM::FK_FPV4_SP_D16));
239 else if (STI.hasFeature(ARM::FeatureVFP3_D16_SP))
240 emitFPU(
241 STI.hasFeature(ARM::FeatureD32)
242 // +d32
243 ? (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3_FP16
244 : ARM::FK_VFPV3)
245 // -d32
246 : (STI.hasFeature(ARM::FeatureFP64)
247 ? (STI.hasFeature(ARM::FeatureFP16)
248 ? ARM::FK_VFPV3_D16_FP16
249 : ARM::FK_VFPV3_D16)
250 : (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3XD_FP16
251 : ARM::FK_VFPV3XD)));
252 else if (STI.hasFeature(ARM::FeatureVFP2_D16_SP))
253 emitFPU(ARM::FK_VFPV2);
256 // ABI_HardFP_use attribute to indicate single precision FP.
257 if (STI.hasFeature(ARM::FeatureVFP2_D16_SP) && !STI.hasFeature(ARM::FeatureFP64))
258 emitAttribute(ARMBuildAttrs::ABI_HardFP_use,
259 ARMBuildAttrs::HardFPSinglePrecision);
261 if (STI.hasFeature(ARM::FeatureFP16))
262 emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP);
264 if (STI.hasFeature(ARM::FeatureMP))
265 emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP);
267 if (STI.hasFeature(ARM::HasMVEFloatOps))
268 emitAttribute(ARMBuildAttrs::MVE_arch, ARMBuildAttrs::AllowMVEIntegerAndFloat);
269 else if (STI.hasFeature(ARM::HasMVEIntegerOps))
270 emitAttribute(ARMBuildAttrs::MVE_arch, ARMBuildAttrs::AllowMVEInteger);
272 // Hardware divide in ARM mode is part of base arch, starting from ARMv8.
273 // If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M).
274 // It is not possible to produce DisallowDIV: if hwdiv is present in the base
275 // arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits.
276 // AllowDIVExt is only emitted if hwdiv isn't available in the base arch;
277 // otherwise, the default value (AllowDIVIfExists) applies.
278 if (STI.hasFeature(ARM::FeatureHWDivARM) && !STI.hasFeature(ARM::HasV8Ops))
279 emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt);
281 if (STI.hasFeature(ARM::FeatureDSP) && isV8M(STI))
282 emitAttribute(ARMBuildAttrs::DSP_extension, ARMBuildAttrs::Allowed);
284 if (STI.hasFeature(ARM::FeatureStrictAlign))
285 emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
286 ARMBuildAttrs::Not_Allowed);
287 else
288 emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
289 ARMBuildAttrs::Allowed);
291 if (STI.hasFeature(ARM::FeatureTrustZone) &&
292 STI.hasFeature(ARM::FeatureVirtualization))
293 emitAttribute(ARMBuildAttrs::Virtualization_use,
294 ARMBuildAttrs::AllowTZVirtualization);
295 else if (STI.hasFeature(ARM::FeatureTrustZone))
296 emitAttribute(ARMBuildAttrs::Virtualization_use, ARMBuildAttrs::AllowTZ);
297 else if (STI.hasFeature(ARM::FeatureVirtualization))
298 emitAttribute(ARMBuildAttrs::Virtualization_use,
299 ARMBuildAttrs::AllowVirtualization);