1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "llvm/MC/MCAssembler.h"
10 #include "llvm/ADT/ArrayRef.h"
11 #include "llvm/ADT/SmallString.h"
12 #include "llvm/ADT/SmallVector.h"
13 #include "llvm/ADT/Statistic.h"
14 #include "llvm/ADT/StringRef.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/MC/MCAsmBackend.h"
17 #include "llvm/MC/MCAsmInfo.h"
18 #include "llvm/MC/MCCodeEmitter.h"
19 #include "llvm/MC/MCCodeView.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCDwarf.h"
22 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCFixup.h"
24 #include "llvm/MC/MCFixupKindInfo.h"
25 #include "llvm/MC/MCFragment.h"
26 #include "llvm/MC/MCInst.h"
27 #include "llvm/MC/MCObjectWriter.h"
28 #include "llvm/MC/MCSection.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCValue.h"
31 #include "llvm/Support/Alignment.h"
32 #include "llvm/Support/Casting.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/EndianStream.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/LEB128.h"
37 #include "llvm/Support/raw_ostream.h"
46 class MCSubtargetInfo
;
49 #define DEBUG_TYPE "assembler"
54 STATISTIC(EmittedFragments
, "Number of emitted assembler fragments - total");
55 STATISTIC(EmittedRelaxableFragments
,
56 "Number of emitted assembler fragments - relaxable");
57 STATISTIC(EmittedDataFragments
,
58 "Number of emitted assembler fragments - data");
59 STATISTIC(EmittedAlignFragments
,
60 "Number of emitted assembler fragments - align");
61 STATISTIC(EmittedFillFragments
,
62 "Number of emitted assembler fragments - fill");
63 STATISTIC(EmittedNopsFragments
, "Number of emitted assembler fragments - nops");
64 STATISTIC(EmittedOrgFragments
, "Number of emitted assembler fragments - org");
65 STATISTIC(evaluateFixup
, "Number of evaluated fixups");
66 STATISTIC(ObjectBytes
, "Number of emitted object file bytes");
67 STATISTIC(RelaxationSteps
, "Number of assembler layout and relaxation steps");
68 STATISTIC(RelaxedInstructions
, "Number of relaxed instructions");
70 } // end namespace stats
71 } // end anonymous namespace
73 // FIXME FIXME FIXME: There are number of places in this file where we convert
74 // what is a 64-bit assembler value used for computation into a value in the
75 // object file, which may truncate it. We should detect that truncation where
76 // invalid and report errors back.
80 MCAssembler::MCAssembler(MCContext
&Context
,
81 std::unique_ptr
<MCAsmBackend
> Backend
,
82 std::unique_ptr
<MCCodeEmitter
> Emitter
,
83 std::unique_ptr
<MCObjectWriter
> Writer
)
84 : Context(Context
), Backend(std::move(Backend
)),
85 Emitter(std::move(Emitter
)), Writer(std::move(Writer
)) {}
87 void MCAssembler::reset() {
94 // reset objects owned by us
96 getBackendPtr()->reset();
98 getEmitterPtr()->reset();
103 bool MCAssembler::registerSection(MCSection
&Section
) {
104 if (Section
.isRegistered())
106 assert(Section
.curFragList()->Head
&& "allocInitialFragment not called");
107 Sections
.push_back(&Section
);
108 Section
.setIsRegistered(true);
112 bool MCAssembler::isThumbFunc(const MCSymbol
*Symbol
) const {
113 if (ThumbFuncs
.count(Symbol
))
116 if (!Symbol
->isVariable())
119 const MCExpr
*Expr
= Symbol
->getVariableValue();
122 if (!Expr
->evaluateAsRelocatable(V
, nullptr, nullptr))
125 if (V
.getSymB() || V
.getRefKind() != MCSymbolRefExpr::VK_None
)
128 const MCSymbolRefExpr
*Ref
= V
.getSymA();
132 if (Ref
->getKind() != MCSymbolRefExpr::VK_None
)
135 const MCSymbol
&Sym
= Ref
->getSymbol();
136 if (!isThumbFunc(&Sym
))
139 ThumbFuncs
.insert(Symbol
); // Cache it.
143 bool MCAssembler::evaluateFixup(const MCFixup
&Fixup
, const MCFragment
*DF
,
144 MCValue
&Target
, const MCSubtargetInfo
*STI
,
145 uint64_t &Value
, bool &WasForced
) const {
146 ++stats::evaluateFixup
;
148 // FIXME: This code has some duplication with recordRelocation. We should
149 // probably merge the two into a single callback that tries to evaluate a
150 // fixup and records a relocation if one is needed.
152 // On error claim to have completely evaluated the fixup, to prevent any
153 // further processing from being done.
154 const MCExpr
*Expr
= Fixup
.getValue();
155 MCContext
&Ctx
= getContext();
158 if (!Expr
->evaluateAsRelocatable(Target
, this, &Fixup
)) {
159 Ctx
.reportError(Fixup
.getLoc(), "expected relocatable expression");
162 if (const MCSymbolRefExpr
*RefB
= Target
.getSymB()) {
163 if (RefB
->getKind() != MCSymbolRefExpr::VK_None
) {
164 Ctx
.reportError(Fixup
.getLoc(),
165 "unsupported subtraction of qualified symbol");
170 unsigned FixupFlags
= getBackend().getFixupKindInfo(Fixup
.getKind()).Flags
;
171 if (FixupFlags
& MCFixupKindInfo::FKF_IsTarget
)
172 return getBackend().evaluateTargetFixup(*this, Fixup
, DF
, Target
, STI
,
175 bool IsPCRel
= FixupFlags
& MCFixupKindInfo::FKF_IsPCRel
;
176 bool IsResolved
= false;
178 if (Target
.getSymB()) {
180 } else if (!Target
.getSymA()) {
183 const MCSymbolRefExpr
*A
= Target
.getSymA();
184 const MCSymbol
&SA
= A
->getSymbol();
185 if (A
->getKind() != MCSymbolRefExpr::VK_None
|| SA
.isUndefined()) {
188 IsResolved
= (FixupFlags
& MCFixupKindInfo::FKF_Constant
) ||
189 getWriter().isSymbolRefDifferenceFullyResolvedImpl(
190 *this, SA
, *DF
, false, true);
194 IsResolved
= Target
.isAbsolute();
197 Value
= Target
.getConstant();
199 if (const MCSymbolRefExpr
*A
= Target
.getSymA()) {
200 const MCSymbol
&Sym
= A
->getSymbol();
202 Value
+= getSymbolOffset(Sym
);
204 if (const MCSymbolRefExpr
*B
= Target
.getSymB()) {
205 const MCSymbol
&Sym
= B
->getSymbol();
207 Value
-= getSymbolOffset(Sym
);
210 bool ShouldAlignPC
= FixupFlags
& MCFixupKindInfo::FKF_IsAlignedDownTo32Bits
;
211 assert((ShouldAlignPC
? IsPCRel
: true) &&
212 "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!");
215 uint64_t Offset
= getFragmentOffset(*DF
) + Fixup
.getOffset();
217 // A number of ARM fixups in Thumb mode require that the effective PC
218 // address be determined as the 32-bit aligned version of the actual offset.
219 if (ShouldAlignPC
) Offset
&= ~0x3;
223 // Let the backend force a relocation if needed.
225 getBackend().shouldForceRelocation(*this, Fixup
, Target
, STI
)) {
230 // A linker relaxation target may emit ADD/SUB relocations for A-B+C. Let
231 // recordRelocation handle non-VK_None cases like A@plt-B+C.
232 if (!IsResolved
&& Target
.getSymA() && Target
.getSymB() &&
233 Target
.getSymA()->getKind() == MCSymbolRefExpr::VK_None
&&
234 getBackend().handleAddSubRelocations(*this, *DF
, Fixup
, Target
, Value
))
240 uint64_t MCAssembler::computeFragmentSize(const MCFragment
&F
) const {
241 assert(getBackendPtr() && "Requires assembler backend");
242 switch (F
.getKind()) {
243 case MCFragment::FT_Data
:
244 return cast
<MCDataFragment
>(F
).getContents().size();
245 case MCFragment::FT_Relaxable
:
246 return cast
<MCRelaxableFragment
>(F
).getContents().size();
247 case MCFragment::FT_Fill
: {
248 auto &FF
= cast
<MCFillFragment
>(F
);
249 int64_t NumValues
= 0;
250 if (!FF
.getNumValues().evaluateKnownAbsolute(NumValues
, *this)) {
251 getContext().reportError(FF
.getLoc(),
252 "expected assembly-time absolute expression");
255 int64_t Size
= NumValues
* FF
.getValueSize();
257 getContext().reportError(FF
.getLoc(), "invalid number of bytes");
263 case MCFragment::FT_Nops
:
264 return cast
<MCNopsFragment
>(F
).getNumBytes();
266 case MCFragment::FT_LEB
:
267 return cast
<MCLEBFragment
>(F
).getContents().size();
269 case MCFragment::FT_BoundaryAlign
:
270 return cast
<MCBoundaryAlignFragment
>(F
).getSize();
272 case MCFragment::FT_SymbolId
:
275 case MCFragment::FT_Align
: {
276 const MCAlignFragment
&AF
= cast
<MCAlignFragment
>(F
);
277 unsigned Offset
= getFragmentOffset(AF
);
278 unsigned Size
= offsetToAlignment(Offset
, AF
.getAlignment());
280 // Insert extra Nops for code alignment if the target define
281 // shouldInsertExtraNopBytesForCodeAlign target hook.
282 if (AF
.getParent()->useCodeAlign() && AF
.hasEmitNops() &&
283 getBackend().shouldInsertExtraNopBytesForCodeAlign(AF
, Size
))
286 // If we are padding with nops, force the padding to be larger than the
288 if (Size
> 0 && AF
.hasEmitNops()) {
289 while (Size
% getBackend().getMinimumNopSize())
290 Size
+= AF
.getAlignment().value();
292 if (Size
> AF
.getMaxBytesToEmit())
297 case MCFragment::FT_Org
: {
298 const MCOrgFragment
&OF
= cast
<MCOrgFragment
>(F
);
300 if (!OF
.getOffset().evaluateAsValue(Value
, *this)) {
301 getContext().reportError(OF
.getLoc(),
302 "expected assembly-time absolute expression");
306 uint64_t FragmentOffset
= getFragmentOffset(OF
);
307 int64_t TargetLocation
= Value
.getConstant();
308 if (const MCSymbolRefExpr
*A
= Value
.getSymA()) {
310 if (!getSymbolOffset(A
->getSymbol(), Val
)) {
311 getContext().reportError(OF
.getLoc(), "expected absolute expression");
314 TargetLocation
+= Val
;
316 int64_t Size
= TargetLocation
- FragmentOffset
;
317 if (Size
< 0 || Size
>= 0x40000000) {
318 getContext().reportError(
319 OF
.getLoc(), "invalid .org offset '" + Twine(TargetLocation
) +
320 "' (at offset '" + Twine(FragmentOffset
) + "')");
326 case MCFragment::FT_Dwarf
:
327 return cast
<MCDwarfLineAddrFragment
>(F
).getContents().size();
328 case MCFragment::FT_DwarfFrame
:
329 return cast
<MCDwarfCallFrameFragment
>(F
).getContents().size();
330 case MCFragment::FT_CVInlineLines
:
331 return cast
<MCCVInlineLineTableFragment
>(F
).getContents().size();
332 case MCFragment::FT_CVDefRange
:
333 return cast
<MCCVDefRangeFragment
>(F
).getContents().size();
334 case MCFragment::FT_PseudoProbe
:
335 return cast
<MCPseudoProbeAddrFragment
>(F
).getContents().size();
336 case MCFragment::FT_Dummy
:
337 llvm_unreachable("Should not have been added");
340 llvm_unreachable("invalid fragment kind");
343 // Compute the amount of padding required before the fragment \p F to
344 // obey bundling restrictions, where \p FOffset is the fragment's offset in
345 // its section and \p FSize is the fragment's size.
346 static uint64_t computeBundlePadding(unsigned BundleSize
,
347 const MCEncodedFragment
*F
,
348 uint64_t FOffset
, uint64_t FSize
) {
349 uint64_t OffsetInBundle
= FOffset
& (BundleSize
- 1);
350 uint64_t EndOfFragment
= OffsetInBundle
+ FSize
;
352 // There are two kinds of bundling restrictions:
354 // 1) For alignToBundleEnd(), add padding to ensure that the fragment will
355 // *end* on a bundle boundary.
356 // 2) Otherwise, check if the fragment would cross a bundle boundary. If it
357 // would, add padding until the end of the bundle so that the fragment
358 // will start in a new one.
359 if (F
->alignToBundleEnd()) {
360 // Three possibilities here:
362 // A) The fragment just happens to end at a bundle boundary, so we're good.
363 // B) The fragment ends before the current bundle boundary: pad it just
364 // enough to reach the boundary.
365 // C) The fragment ends after the current bundle boundary: pad it until it
366 // reaches the end of the next bundle boundary.
368 // Note: this code could be made shorter with some modulo trickery, but it's
369 // intentionally kept in its more explicit form for simplicity.
370 if (EndOfFragment
== BundleSize
)
372 else if (EndOfFragment
< BundleSize
)
373 return BundleSize
- EndOfFragment
;
374 else { // EndOfFragment > BundleSize
375 return 2 * BundleSize
- EndOfFragment
;
377 } else if (OffsetInBundle
> 0 && EndOfFragment
> BundleSize
)
378 return BundleSize
- OffsetInBundle
;
383 void MCAssembler::layoutBundle(MCFragment
*Prev
, MCFragment
*F
) const {
384 // If bundling is enabled and this fragment has instructions in it, it has to
385 // obey the bundling restrictions. With padding, we'll have:
390 // -------------------------------------
391 // Prev |##########| F |
392 // -------------------------------------
397 // The fragment's offset will point to after the padding, and its computed
398 // size won't include the padding.
400 // ".align N" is an example of a directive that introduces multiple
401 // fragments. We could add a special case to handle ".align N" by emitting
402 // within-fragment padding (which would produce less padding when N is less
403 // than the bundle size), but for now we don't.
405 assert(isa
<MCEncodedFragment
>(F
) &&
406 "Only MCEncodedFragment implementations have instructions");
407 MCEncodedFragment
*EF
= cast
<MCEncodedFragment
>(F
);
408 uint64_t FSize
= computeFragmentSize(*EF
);
410 if (FSize
> getBundleAlignSize())
411 report_fatal_error("Fragment can't be larger than a bundle size");
413 uint64_t RequiredBundlePadding
=
414 computeBundlePadding(getBundleAlignSize(), EF
, EF
->Offset
, FSize
);
415 if (RequiredBundlePadding
> UINT8_MAX
)
416 report_fatal_error("Padding cannot exceed 255 bytes");
417 EF
->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding
));
418 EF
->Offset
+= RequiredBundlePadding
;
419 if (auto *DF
= dyn_cast_or_null
<MCDataFragment
>(Prev
))
420 if (DF
->getContents().empty())
421 DF
->Offset
= EF
->Offset
;
424 void MCAssembler::ensureValid(MCSection
&Sec
) const {
427 Sec
.setHasLayout(true);
428 MCFragment
*Prev
= nullptr;
430 for (MCFragment
&F
: Sec
) {
432 if (isBundlingEnabled() && F
.hasInstructions()) {
433 layoutBundle(Prev
, &F
);
436 Offset
+= computeFragmentSize(F
);
441 uint64_t MCAssembler::getFragmentOffset(const MCFragment
&F
) const {
442 ensureValid(*F
.getParent());
446 // Simple getSymbolOffset helper for the non-variable case.
447 static bool getLabelOffset(const MCAssembler
&Asm
, const MCSymbol
&S
,
448 bool ReportError
, uint64_t &Val
) {
449 if (!S
.getFragment()) {
451 report_fatal_error("unable to evaluate offset to undefined symbol '" +
455 Val
= Asm
.getFragmentOffset(*S
.getFragment()) + S
.getOffset();
459 static bool getSymbolOffsetImpl(const MCAssembler
&Asm
, const MCSymbol
&S
,
460 bool ReportError
, uint64_t &Val
) {
462 return getLabelOffset(Asm
, S
, ReportError
, Val
);
464 // If SD is a variable, evaluate it.
466 if (!S
.getVariableValue()->evaluateAsValue(Target
, Asm
))
467 report_fatal_error("unable to evaluate offset for variable '" +
470 uint64_t Offset
= Target
.getConstant();
472 const MCSymbolRefExpr
*A
= Target
.getSymA();
475 // FIXME: On most platforms, `Target`'s component symbols are labels from
476 // having been simplified during evaluation, but on Mach-O they can be
477 // variables due to PR19203. This, and the line below for `B` can be
478 // restored to call `getLabelOffset` when PR19203 is fixed.
479 if (!getSymbolOffsetImpl(Asm
, A
->getSymbol(), ReportError
, ValA
))
484 const MCSymbolRefExpr
*B
= Target
.getSymB();
487 if (!getSymbolOffsetImpl(Asm
, B
->getSymbol(), ReportError
, ValB
))
496 bool MCAssembler::getSymbolOffset(const MCSymbol
&S
, uint64_t &Val
) const {
497 return getSymbolOffsetImpl(*this, S
, false, Val
);
500 uint64_t MCAssembler::getSymbolOffset(const MCSymbol
&S
) const {
502 getSymbolOffsetImpl(*this, S
, true, Val
);
506 const MCSymbol
*MCAssembler::getBaseSymbol(const MCSymbol
&Symbol
) const {
508 if (!Symbol
.isVariable())
511 const MCExpr
*Expr
= Symbol
.getVariableValue();
513 if (!Expr
->evaluateAsValue(Value
, *this)) {
514 getContext().reportError(Expr
->getLoc(),
515 "expression could not be evaluated");
519 const MCSymbolRefExpr
*RefB
= Value
.getSymB();
521 getContext().reportError(
523 Twine("symbol '") + RefB
->getSymbol().getName() +
524 "' could not be evaluated in a subtraction expression");
528 const MCSymbolRefExpr
*A
= Value
.getSymA();
532 const MCSymbol
&ASym
= A
->getSymbol();
533 if (ASym
.isCommon()) {
534 getContext().reportError(Expr
->getLoc(),
535 "Common symbol '" + ASym
.getName() +
536 "' cannot be used in assignment expr");
543 uint64_t MCAssembler::getSectionAddressSize(const MCSection
&Sec
) const {
545 // The size is the last fragment's end offset.
546 const MCFragment
&F
= *Sec
.curFragList()->Tail
;
547 return getFragmentOffset(F
) + computeFragmentSize(F
);
550 uint64_t MCAssembler::getSectionFileSize(const MCSection
&Sec
) const {
551 // Virtual sections have no file size.
552 if (Sec
.isVirtualSection())
554 return getSectionAddressSize(Sec
);
557 bool MCAssembler::registerSymbol(const MCSymbol
&Symbol
) {
558 bool Changed
= !Symbol
.isRegistered();
560 Symbol
.setIsRegistered(true);
561 Symbols
.push_back(&Symbol
);
566 void MCAssembler::writeFragmentPadding(raw_ostream
&OS
,
567 const MCEncodedFragment
&EF
,
568 uint64_t FSize
) const {
569 assert(getBackendPtr() && "Expected assembler backend");
570 // Should NOP padding be written out before this fragment?
571 unsigned BundlePadding
= EF
.getBundlePadding();
572 if (BundlePadding
> 0) {
573 assert(isBundlingEnabled() &&
574 "Writing bundle padding with disabled bundling");
575 assert(EF
.hasInstructions() &&
576 "Writing bundle padding for a fragment without instructions");
578 unsigned TotalLength
= BundlePadding
+ static_cast<unsigned>(FSize
);
579 const MCSubtargetInfo
*STI
= EF
.getSubtargetInfo();
580 if (EF
.alignToBundleEnd() && TotalLength
> getBundleAlignSize()) {
581 // If the padding itself crosses a bundle boundary, it must be emitted
582 // in 2 pieces, since even nop instructions must not cross boundaries.
583 // v--------------v <- BundleAlignSize
584 // v---------v <- BundlePadding
585 // ----------------------------
586 // | Prev |####|####| F |
587 // ----------------------------
588 // ^-------------------^ <- TotalLength
589 unsigned DistanceToBoundary
= TotalLength
- getBundleAlignSize();
590 if (!getBackend().writeNopData(OS
, DistanceToBoundary
, STI
))
591 report_fatal_error("unable to write NOP sequence of " +
592 Twine(DistanceToBoundary
) + " bytes");
593 BundlePadding
-= DistanceToBoundary
;
595 if (!getBackend().writeNopData(OS
, BundlePadding
, STI
))
596 report_fatal_error("unable to write NOP sequence of " +
597 Twine(BundlePadding
) + " bytes");
601 /// Write the fragment \p F to the output file.
602 static void writeFragment(raw_ostream
&OS
, const MCAssembler
&Asm
,
603 const MCFragment
&F
) {
604 // FIXME: Embed in fragments instead?
605 uint64_t FragmentSize
= Asm
.computeFragmentSize(F
);
607 llvm::endianness Endian
= Asm
.getBackend().Endian
;
609 if (const MCEncodedFragment
*EF
= dyn_cast
<MCEncodedFragment
>(&F
))
610 Asm
.writeFragmentPadding(OS
, *EF
, FragmentSize
);
612 // This variable (and its dummy usage) is to participate in the assert at
613 // the end of the function.
614 uint64_t Start
= OS
.tell();
617 ++stats::EmittedFragments
;
619 switch (F
.getKind()) {
620 case MCFragment::FT_Align
: {
621 ++stats::EmittedAlignFragments
;
622 const MCAlignFragment
&AF
= cast
<MCAlignFragment
>(F
);
623 assert(AF
.getValueSize() && "Invalid virtual align in concrete fragment!");
625 uint64_t Count
= FragmentSize
/ AF
.getValueSize();
627 // FIXME: This error shouldn't actually occur (the front end should emit
628 // multiple .align directives to enforce the semantics it wants), but is
629 // severe enough that we want to report it. How to handle this?
630 if (Count
* AF
.getValueSize() != FragmentSize
)
631 report_fatal_error("undefined .align directive, value size '" +
632 Twine(AF
.getValueSize()) +
633 "' is not a divisor of padding size '" +
634 Twine(FragmentSize
) + "'");
636 // See if we are aligning with nops, and if so do that first to try to fill
637 // the Count bytes. Then if that did not fill any bytes or there are any
638 // bytes left to fill use the Value and ValueSize to fill the rest.
639 // If we are aligning with nops, ask that target to emit the right data.
640 if (AF
.hasEmitNops()) {
641 if (!Asm
.getBackend().writeNopData(OS
, Count
, AF
.getSubtargetInfo()))
642 report_fatal_error("unable to write nop sequence of " +
643 Twine(Count
) + " bytes");
647 // Otherwise, write out in multiples of the value size.
648 for (uint64_t i
= 0; i
!= Count
; ++i
) {
649 switch (AF
.getValueSize()) {
650 default: llvm_unreachable("Invalid size!");
651 case 1: OS
<< char(AF
.getValue()); break;
653 support::endian::write
<uint16_t>(OS
, AF
.getValue(), Endian
);
656 support::endian::write
<uint32_t>(OS
, AF
.getValue(), Endian
);
659 support::endian::write
<uint64_t>(OS
, AF
.getValue(), Endian
);
666 case MCFragment::FT_Data
:
667 ++stats::EmittedDataFragments
;
668 OS
<< cast
<MCDataFragment
>(F
).getContents();
671 case MCFragment::FT_Relaxable
:
672 ++stats::EmittedRelaxableFragments
;
673 OS
<< cast
<MCRelaxableFragment
>(F
).getContents();
676 case MCFragment::FT_Fill
: {
677 ++stats::EmittedFillFragments
;
678 const MCFillFragment
&FF
= cast
<MCFillFragment
>(F
);
679 uint64_t V
= FF
.getValue();
680 unsigned VSize
= FF
.getValueSize();
681 const unsigned MaxChunkSize
= 16;
682 char Data
[MaxChunkSize
];
683 assert(0 < VSize
&& VSize
<= MaxChunkSize
&& "Illegal fragment fill size");
684 // Duplicate V into Data as byte vector to reduce number of
685 // writes done. As such, do endian conversion here.
686 for (unsigned I
= 0; I
!= VSize
; ++I
) {
687 unsigned index
= Endian
== llvm::endianness::little
? I
: (VSize
- I
- 1);
688 Data
[I
] = uint8_t(V
>> (index
* 8));
690 for (unsigned I
= VSize
; I
< MaxChunkSize
; ++I
)
691 Data
[I
] = Data
[I
- VSize
];
693 // Set to largest multiple of VSize in Data.
694 const unsigned NumPerChunk
= MaxChunkSize
/ VSize
;
695 // Set ChunkSize to largest multiple of VSize in Data
696 const unsigned ChunkSize
= VSize
* NumPerChunk
;
698 // Do copies by chunk.
699 StringRef
Ref(Data
, ChunkSize
);
700 for (uint64_t I
= 0, E
= FragmentSize
/ ChunkSize
; I
!= E
; ++I
)
703 // do remainder if needed.
704 unsigned TrailingCount
= FragmentSize
% ChunkSize
;
706 OS
.write(Data
, TrailingCount
);
710 case MCFragment::FT_Nops
: {
711 ++stats::EmittedNopsFragments
;
712 const MCNopsFragment
&NF
= cast
<MCNopsFragment
>(F
);
714 int64_t NumBytes
= NF
.getNumBytes();
715 int64_t ControlledNopLength
= NF
.getControlledNopLength();
716 int64_t MaximumNopLength
=
717 Asm
.getBackend().getMaximumNopSize(*NF
.getSubtargetInfo());
719 assert(NumBytes
> 0 && "Expected positive NOPs fragment size");
720 assert(ControlledNopLength
>= 0 && "Expected non-negative NOP size");
722 if (ControlledNopLength
> MaximumNopLength
) {
723 Asm
.getContext().reportError(NF
.getLoc(),
724 "illegal NOP size " +
725 std::to_string(ControlledNopLength
) +
726 ". (expected within [0, " +
727 std::to_string(MaximumNopLength
) + "])");
728 // Clamp the NOP length as reportError does not stop the execution
730 ControlledNopLength
= MaximumNopLength
;
733 // Use maximum value if the size of each NOP is not specified
734 if (!ControlledNopLength
)
735 ControlledNopLength
= MaximumNopLength
;
738 uint64_t NumBytesToEmit
=
739 (uint64_t)std::min(NumBytes
, ControlledNopLength
);
740 assert(NumBytesToEmit
&& "try to emit empty NOP instruction");
741 if (!Asm
.getBackend().writeNopData(OS
, NumBytesToEmit
,
742 NF
.getSubtargetInfo())) {
743 report_fatal_error("unable to write nop sequence of the remaining " +
744 Twine(NumBytesToEmit
) + " bytes");
747 NumBytes
-= NumBytesToEmit
;
752 case MCFragment::FT_LEB
: {
753 const MCLEBFragment
&LF
= cast
<MCLEBFragment
>(F
);
754 OS
<< LF
.getContents();
758 case MCFragment::FT_BoundaryAlign
: {
759 const MCBoundaryAlignFragment
&BF
= cast
<MCBoundaryAlignFragment
>(F
);
760 if (!Asm
.getBackend().writeNopData(OS
, FragmentSize
, BF
.getSubtargetInfo()))
761 report_fatal_error("unable to write nop sequence of " +
762 Twine(FragmentSize
) + " bytes");
766 case MCFragment::FT_SymbolId
: {
767 const MCSymbolIdFragment
&SF
= cast
<MCSymbolIdFragment
>(F
);
768 support::endian::write
<uint32_t>(OS
, SF
.getSymbol()->getIndex(), Endian
);
772 case MCFragment::FT_Org
: {
773 ++stats::EmittedOrgFragments
;
774 const MCOrgFragment
&OF
= cast
<MCOrgFragment
>(F
);
776 for (uint64_t i
= 0, e
= FragmentSize
; i
!= e
; ++i
)
777 OS
<< char(OF
.getValue());
782 case MCFragment::FT_Dwarf
: {
783 const MCDwarfLineAddrFragment
&OF
= cast
<MCDwarfLineAddrFragment
>(F
);
784 OS
<< OF
.getContents();
787 case MCFragment::FT_DwarfFrame
: {
788 const MCDwarfCallFrameFragment
&CF
= cast
<MCDwarfCallFrameFragment
>(F
);
789 OS
<< CF
.getContents();
792 case MCFragment::FT_CVInlineLines
: {
793 const auto &OF
= cast
<MCCVInlineLineTableFragment
>(F
);
794 OS
<< OF
.getContents();
797 case MCFragment::FT_CVDefRange
: {
798 const auto &DRF
= cast
<MCCVDefRangeFragment
>(F
);
799 OS
<< DRF
.getContents();
802 case MCFragment::FT_PseudoProbe
: {
803 const MCPseudoProbeAddrFragment
&PF
= cast
<MCPseudoProbeAddrFragment
>(F
);
804 OS
<< PF
.getContents();
807 case MCFragment::FT_Dummy
:
808 llvm_unreachable("Should not have been added");
811 assert(OS
.tell() - Start
== FragmentSize
&&
812 "The stream should advance by fragment size");
815 void MCAssembler::writeSectionData(raw_ostream
&OS
,
816 const MCSection
*Sec
) const {
817 assert(getBackendPtr() && "Expected assembler backend");
819 // Ignore virtual sections.
820 if (Sec
->isVirtualSection()) {
821 assert(getSectionFileSize(*Sec
) == 0 && "Invalid size for section!");
823 // Check that contents are only things legal inside a virtual section.
824 for (const MCFragment
&F
: *Sec
) {
825 switch (F
.getKind()) {
826 default: llvm_unreachable("Invalid fragment in virtual section!");
827 case MCFragment::FT_Data
: {
828 // Check that we aren't trying to write a non-zero contents (or fixups)
829 // into a virtual section. This is to support clients which use standard
830 // directives to fill the contents of virtual sections.
831 const MCDataFragment
&DF
= cast
<MCDataFragment
>(F
);
832 if (DF
.fixup_begin() != DF
.fixup_end())
833 getContext().reportError(SMLoc(), Sec
->getVirtualSectionKind() +
834 " section '" + Sec
->getName() +
835 "' cannot have fixups");
836 for (unsigned i
= 0, e
= DF
.getContents().size(); i
!= e
; ++i
)
837 if (DF
.getContents()[i
]) {
838 getContext().reportError(SMLoc(),
839 Sec
->getVirtualSectionKind() +
840 " section '" + Sec
->getName() +
841 "' cannot have non-zero initializers");
846 case MCFragment::FT_Align
:
847 // Check that we aren't trying to write a non-zero value into a virtual
849 assert((cast
<MCAlignFragment
>(F
).getValueSize() == 0 ||
850 cast
<MCAlignFragment
>(F
).getValue() == 0) &&
851 "Invalid align in virtual section!");
853 case MCFragment::FT_Fill
:
854 assert((cast
<MCFillFragment
>(F
).getValue() == 0) &&
855 "Invalid fill in virtual section!");
857 case MCFragment::FT_Org
:
865 uint64_t Start
= OS
.tell();
868 for (const MCFragment
&F
: *Sec
)
869 writeFragment(OS
, *this, F
);
871 assert(getContext().hadError() ||
872 OS
.tell() - Start
== getSectionAddressSize(*Sec
));
875 std::tuple
<MCValue
, uint64_t, bool>
876 MCAssembler::handleFixup(MCFragment
&F
, const MCFixup
&Fixup
,
877 const MCSubtargetInfo
*STI
) {
878 // Evaluate the fixup.
883 evaluateFixup(Fixup
, &F
, Target
, STI
, FixedValue
, WasForced
);
885 // The fixup was unresolved, we need a relocation. Inform the object
886 // writer of the relocation, and give it an opportunity to adjust the
887 // fixup value if need be.
888 getWriter().recordRelocation(*this, &F
, Fixup
, Target
, FixedValue
);
890 return std::make_tuple(Target
, FixedValue
, IsResolved
);
893 void MCAssembler::layout() {
894 assert(getBackendPtr() && "Expected assembler backend");
895 DEBUG_WITH_TYPE("mc-dump", {
896 errs() << "assembler backend - pre-layout\n--\n";
899 // Assign section ordinals.
900 unsigned SectionIndex
= 0;
901 for (MCSection
&Sec
: *this) {
902 Sec
.setOrdinal(SectionIndex
++);
904 // Chain together fragments from all subsections.
905 if (Sec
.Subsections
.size() > 1) {
906 MCDummyFragment Dummy
;
907 MCFragment
*Tail
= &Dummy
;
908 for (auto &[_
, List
] : Sec
.Subsections
) {
910 Tail
->Next
= List
.Head
;
913 Sec
.Subsections
.clear();
914 Sec
.Subsections
.push_back({0u, {Dummy
.getNext(), Tail
}});
915 Sec
.CurFragList
= &Sec
.Subsections
[0].second
;
917 unsigned FragmentIndex
= 0;
918 for (MCFragment
&Frag
: Sec
)
919 Frag
.setLayoutOrder(FragmentIndex
++);
923 // Layout until everything fits.
924 this->HasLayout
= true;
925 while (layoutOnce()) {
926 if (getContext().hadError())
928 // Size of fragments in one section can depend on the size of fragments in
929 // another. If any fragment has changed size, we have to re-layout (and
930 // as a result possibly further relax) all.
931 for (MCSection
&Sec
: *this)
932 Sec
.setHasLayout(false);
935 DEBUG_WITH_TYPE("mc-dump", {
936 errs() << "assembler backend - post-relaxation\n--\n";
939 // Finalize the layout, including fragment lowering.
940 getBackend().finishLayout(*this);
942 DEBUG_WITH_TYPE("mc-dump", {
943 errs() << "assembler backend - final-layout\n--\n";
946 // Allow the object writer a chance to perform post-layout binding (for
947 // example, to set the index fields in the symbol data).
948 getWriter().executePostLayoutBinding(*this);
950 // Evaluate and apply the fixups, generating relocation entries as necessary.
951 for (MCSection
&Sec
: *this) {
952 for (MCFragment
&Frag
: Sec
) {
953 ArrayRef
<MCFixup
> Fixups
;
954 MutableArrayRef
<char> Contents
;
955 const MCSubtargetInfo
*STI
= nullptr;
957 // Process MCAlignFragment and MCEncodedFragmentWithFixups here.
958 switch (Frag
.getKind()) {
961 case MCFragment::FT_Align
: {
962 MCAlignFragment
&AF
= cast
<MCAlignFragment
>(Frag
);
963 // Insert fixup type for code alignment if the target define
964 // shouldInsertFixupForCodeAlign target hook.
965 if (Sec
.useCodeAlign() && AF
.hasEmitNops())
966 getBackend().shouldInsertFixupForCodeAlign(*this, AF
);
969 case MCFragment::FT_Data
: {
970 MCDataFragment
&DF
= cast
<MCDataFragment
>(Frag
);
971 Fixups
= DF
.getFixups();
972 Contents
= DF
.getContents();
973 STI
= DF
.getSubtargetInfo();
974 assert(!DF
.hasInstructions() || STI
!= nullptr);
977 case MCFragment::FT_Relaxable
: {
978 MCRelaxableFragment
&RF
= cast
<MCRelaxableFragment
>(Frag
);
979 Fixups
= RF
.getFixups();
980 Contents
= RF
.getContents();
981 STI
= RF
.getSubtargetInfo();
982 assert(!RF
.hasInstructions() || STI
!= nullptr);
985 case MCFragment::FT_CVDefRange
: {
986 MCCVDefRangeFragment
&CF
= cast
<MCCVDefRangeFragment
>(Frag
);
987 Fixups
= CF
.getFixups();
988 Contents
= CF
.getContents();
991 case MCFragment::FT_Dwarf
: {
992 MCDwarfLineAddrFragment
&DF
= cast
<MCDwarfLineAddrFragment
>(Frag
);
993 Fixups
= DF
.getFixups();
994 Contents
= DF
.getContents();
997 case MCFragment::FT_DwarfFrame
: {
998 MCDwarfCallFrameFragment
&DF
= cast
<MCDwarfCallFrameFragment
>(Frag
);
999 Fixups
= DF
.getFixups();
1000 Contents
= DF
.getContents();
1003 case MCFragment::FT_LEB
: {
1004 auto &LF
= cast
<MCLEBFragment
>(Frag
);
1005 Fixups
= LF
.getFixups();
1006 Contents
= LF
.getContents();
1009 case MCFragment::FT_PseudoProbe
: {
1010 MCPseudoProbeAddrFragment
&PF
= cast
<MCPseudoProbeAddrFragment
>(Frag
);
1011 Fixups
= PF
.getFixups();
1012 Contents
= PF
.getContents();
1016 for (const MCFixup
&Fixup
: Fixups
) {
1017 uint64_t FixedValue
;
1020 std::tie(Target
, FixedValue
, IsResolved
) =
1021 handleFixup(Frag
, Fixup
, STI
);
1022 getBackend().applyFixup(*this, Fixup
, Target
, Contents
, FixedValue
,
1029 void MCAssembler::Finish() {
1032 // Write the object file.
1033 stats::ObjectBytes
+= getWriter().writeObject(*this);
1038 bool MCAssembler::fixupNeedsRelaxation(const MCFixup
&Fixup
,
1039 const MCRelaxableFragment
*DF
) const {
1040 assert(getBackendPtr() && "Expected assembler backend");
1044 bool Resolved
= evaluateFixup(Fixup
, DF
, Target
, DF
->getSubtargetInfo(),
1046 if (Target
.getSymA() &&
1047 Target
.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8
&&
1048 Fixup
.getKind() == FK_Data_1
)
1050 return getBackend().fixupNeedsRelaxationAdvanced(*this, Fixup
, Resolved
,
1051 Value
, DF
, WasForced
);
1054 bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment
*F
) const {
1055 assert(getBackendPtr() && "Expected assembler backend");
1056 // If this inst doesn't ever need relaxation, ignore it. This occurs when we
1057 // are intentionally pushing out inst fragments, or because we relaxed a
1058 // previous instruction to one that doesn't need relaxation.
1059 if (!getBackend().mayNeedRelaxation(F
->getInst(), *F
->getSubtargetInfo()))
1062 for (const MCFixup
&Fixup
: F
->getFixups())
1063 if (fixupNeedsRelaxation(Fixup
, F
))
1069 bool MCAssembler::relaxInstruction(MCRelaxableFragment
&F
) {
1070 assert(getEmitterPtr() &&
1071 "Expected CodeEmitter defined for relaxInstruction");
1072 if (!fragmentNeedsRelaxation(&F
))
1075 ++stats::RelaxedInstructions
;
1077 // FIXME-PERF: We could immediately lower out instructions if we can tell
1078 // they are fully resolved, to avoid retesting on later passes.
1080 // Relax the fragment.
1082 MCInst Relaxed
= F
.getInst();
1083 getBackend().relaxInstruction(Relaxed
, *F
.getSubtargetInfo());
1085 // Encode the new instruction.
1087 F
.getFixups().clear();
1088 F
.getContents().clear();
1089 getEmitter().encodeInstruction(Relaxed
, F
.getContents(), F
.getFixups(),
1090 *F
.getSubtargetInfo());
1094 bool MCAssembler::relaxLEB(MCLEBFragment
&LF
) {
1095 const unsigned OldSize
= static_cast<unsigned>(LF
.getContents().size());
1096 unsigned PadTo
= OldSize
;
1098 SmallVectorImpl
<char> &Data
= LF
.getContents();
1099 LF
.getFixups().clear();
1100 // Use evaluateKnownAbsolute for Mach-O as a hack: .subsections_via_symbols
1101 // requires that .uleb128 A-B is foldable where A and B reside in different
1102 // fragments. This is used by __gcc_except_table.
1103 bool Abs
= getWriter().getSubsectionsViaSymbols()
1104 ? LF
.getValue().evaluateKnownAbsolute(Value
, *this)
1105 : LF
.getValue().evaluateAsAbsolute(Value
, *this);
1107 bool Relaxed
, UseZeroPad
;
1108 std::tie(Relaxed
, UseZeroPad
) = getBackend().relaxLEB128(*this, LF
, Value
);
1110 getContext().reportError(LF
.getValue().getLoc(),
1111 Twine(LF
.isSigned() ? ".s" : ".u") +
1112 "leb128 expression is not absolute");
1113 LF
.setValue(MCConstantExpr::create(0, Context
));
1115 uint8_t Tmp
[10]; // maximum size: ceil(64/7)
1116 PadTo
= std::max(PadTo
, encodeULEB128(uint64_t(Value
), Tmp
));
1121 raw_svector_ostream
OSE(Data
);
1122 // The compiler can generate EH table assembly that is impossible to assemble
1123 // without either adding padding to an LEB fragment or adding extra padding
1124 // to a later alignment fragment. To accommodate such tables, relaxation can
1125 // only increase an LEB fragment size here, not decrease it. See PR35809.
1127 encodeSLEB128(Value
, OSE
, PadTo
);
1129 encodeULEB128(Value
, OSE
, PadTo
);
1130 return OldSize
!= LF
.getContents().size();
1133 /// Check if the branch crosses the boundary.
1135 /// \param StartAddr start address of the fused/unfused branch.
1136 /// \param Size size of the fused/unfused branch.
1137 /// \param BoundaryAlignment alignment requirement of the branch.
1138 /// \returns true if the branch cross the boundary.
1139 static bool mayCrossBoundary(uint64_t StartAddr
, uint64_t Size
,
1140 Align BoundaryAlignment
) {
1141 uint64_t EndAddr
= StartAddr
+ Size
;
1142 return (StartAddr
>> Log2(BoundaryAlignment
)) !=
1143 ((EndAddr
- 1) >> Log2(BoundaryAlignment
));
1146 /// Check if the branch is against the boundary.
1148 /// \param StartAddr start address of the fused/unfused branch.
1149 /// \param Size size of the fused/unfused branch.
1150 /// \param BoundaryAlignment alignment requirement of the branch.
1151 /// \returns true if the branch is against the boundary.
1152 static bool isAgainstBoundary(uint64_t StartAddr
, uint64_t Size
,
1153 Align BoundaryAlignment
) {
1154 uint64_t EndAddr
= StartAddr
+ Size
;
1155 return (EndAddr
& (BoundaryAlignment
.value() - 1)) == 0;
1158 /// Check if the branch needs padding.
1160 /// \param StartAddr start address of the fused/unfused branch.
1161 /// \param Size size of the fused/unfused branch.
1162 /// \param BoundaryAlignment alignment requirement of the branch.
1163 /// \returns true if the branch needs padding.
1164 static bool needPadding(uint64_t StartAddr
, uint64_t Size
,
1165 Align BoundaryAlignment
) {
1166 return mayCrossBoundary(StartAddr
, Size
, BoundaryAlignment
) ||
1167 isAgainstBoundary(StartAddr
, Size
, BoundaryAlignment
);
1170 bool MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment
&BF
) {
1171 // BoundaryAlignFragment that doesn't need to align any fragment should not be
1173 if (!BF
.getLastFragment())
1176 uint64_t AlignedOffset
= getFragmentOffset(BF
);
1177 uint64_t AlignedSize
= 0;
1178 for (const MCFragment
*F
= BF
.getNext();; F
= F
->getNext()) {
1179 AlignedSize
+= computeFragmentSize(*F
);
1180 if (F
== BF
.getLastFragment())
1184 Align BoundaryAlignment
= BF
.getAlignment();
1185 uint64_t NewSize
= needPadding(AlignedOffset
, AlignedSize
, BoundaryAlignment
)
1186 ? offsetToAlignment(AlignedOffset
, BoundaryAlignment
)
1188 if (NewSize
== BF
.getSize())
1190 BF
.setSize(NewSize
);
1194 bool MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment
&DF
) {
1196 if (getBackend().relaxDwarfLineAddr(*this, DF
, WasRelaxed
))
1199 MCContext
&Context
= getContext();
1200 uint64_t OldSize
= DF
.getContents().size();
1202 bool Abs
= DF
.getAddrDelta().evaluateKnownAbsolute(AddrDelta
, *this);
1203 assert(Abs
&& "We created a line delta with an invalid expression");
1206 LineDelta
= DF
.getLineDelta();
1207 SmallVectorImpl
<char> &Data
= DF
.getContents();
1209 DF
.getFixups().clear();
1211 MCDwarfLineAddr::encode(Context
, getDWARFLinetableParams(), LineDelta
,
1213 return OldSize
!= Data
.size();
1216 bool MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment
&DF
) {
1218 if (getBackend().relaxDwarfCFA(*this, DF
, WasRelaxed
))
1221 MCContext
&Context
= getContext();
1223 bool Abs
= DF
.getAddrDelta().evaluateAsAbsolute(Value
, *this);
1225 getContext().reportError(DF
.getAddrDelta().getLoc(),
1226 "invalid CFI advance_loc expression");
1227 DF
.setAddrDelta(MCConstantExpr::create(0, Context
));
1231 SmallVectorImpl
<char> &Data
= DF
.getContents();
1232 uint64_t OldSize
= Data
.size();
1234 DF
.getFixups().clear();
1236 MCDwarfFrameEmitter::encodeAdvanceLoc(Context
, Value
, Data
);
1237 return OldSize
!= Data
.size();
1240 bool MCAssembler::relaxCVInlineLineTable(MCCVInlineLineTableFragment
&F
) {
1241 unsigned OldSize
= F
.getContents().size();
1242 getContext().getCVContext().encodeInlineLineTable(*this, F
);
1243 return OldSize
!= F
.getContents().size();
1246 bool MCAssembler::relaxCVDefRange(MCCVDefRangeFragment
&F
) {
1247 unsigned OldSize
= F
.getContents().size();
1248 getContext().getCVContext().encodeDefRange(*this, F
);
1249 return OldSize
!= F
.getContents().size();
1252 bool MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment
&PF
) {
1253 uint64_t OldSize
= PF
.getContents().size();
1255 bool Abs
= PF
.getAddrDelta().evaluateKnownAbsolute(AddrDelta
, *this);
1256 assert(Abs
&& "We created a pseudo probe with an invalid expression");
1258 SmallVectorImpl
<char> &Data
= PF
.getContents();
1260 raw_svector_ostream
OSE(Data
);
1261 PF
.getFixups().clear();
1263 // AddrDelta is a signed integer
1264 encodeSLEB128(AddrDelta
, OSE
, OldSize
);
1265 return OldSize
!= Data
.size();
1268 bool MCAssembler::relaxFragment(MCFragment
&F
) {
1269 switch(F
.getKind()) {
1272 case MCFragment::FT_Relaxable
:
1273 assert(!getRelaxAll() &&
1274 "Did not expect a MCRelaxableFragment in RelaxAll mode");
1275 return relaxInstruction(cast
<MCRelaxableFragment
>(F
));
1276 case MCFragment::FT_Dwarf
:
1277 return relaxDwarfLineAddr(cast
<MCDwarfLineAddrFragment
>(F
));
1278 case MCFragment::FT_DwarfFrame
:
1279 return relaxDwarfCallFrameFragment(cast
<MCDwarfCallFrameFragment
>(F
));
1280 case MCFragment::FT_LEB
:
1281 return relaxLEB(cast
<MCLEBFragment
>(F
));
1282 case MCFragment::FT_BoundaryAlign
:
1283 return relaxBoundaryAlign(cast
<MCBoundaryAlignFragment
>(F
));
1284 case MCFragment::FT_CVInlineLines
:
1285 return relaxCVInlineLineTable(cast
<MCCVInlineLineTableFragment
>(F
));
1286 case MCFragment::FT_CVDefRange
:
1287 return relaxCVDefRange(cast
<MCCVDefRangeFragment
>(F
));
1288 case MCFragment::FT_PseudoProbe
:
1289 return relaxPseudoProbeAddr(cast
<MCPseudoProbeAddrFragment
>(F
));
1293 bool MCAssembler::layoutOnce() {
1294 ++stats::RelaxationSteps
;
1296 bool Changed
= false;
1297 for (MCSection
&Sec
: *this)
1298 for (MCFragment
&Frag
: Sec
)
1299 if (relaxFragment(Frag
))
1304 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1305 LLVM_DUMP_METHOD
void MCAssembler::dump() const{
1306 raw_ostream
&OS
= errs();
1308 OS
<< "<MCAssembler\n";
1309 OS
<< " Sections:[\n ";
1311 for (const MCSection
&Sec
: *this) {
1322 for (const MCSymbol
&Sym
: symbols()) {
1329 OS
<< ", Index:" << Sym
.getIndex() << ", ";