1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "llvm/MC/MCAssembler.h"
10 #include "llvm/ADT/ArrayRef.h"
11 #include "llvm/ADT/SmallString.h"
12 #include "llvm/ADT/SmallVector.h"
13 #include "llvm/ADT/Statistic.h"
14 #include "llvm/ADT/StringRef.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/MC/MCAsmBackend.h"
17 #include "llvm/MC/MCAsmInfo.h"
18 #include "llvm/MC/MCCodeEmitter.h"
19 #include "llvm/MC/MCCodeView.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCDwarf.h"
22 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCFixup.h"
24 #include "llvm/MC/MCFixupKindInfo.h"
25 #include "llvm/MC/MCFragment.h"
26 #include "llvm/MC/MCInst.h"
27 #include "llvm/MC/MCObjectWriter.h"
28 #include "llvm/MC/MCSection.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/MC/MCValue.h"
31 #include "llvm/Support/Alignment.h"
32 #include "llvm/Support/Casting.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/EndianStream.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/LEB128.h"
37 #include "llvm/Support/raw_ostream.h"
46 class MCSubtargetInfo
;
49 #define DEBUG_TYPE "assembler"
54 STATISTIC(EmittedFragments
, "Number of emitted assembler fragments - total");
55 STATISTIC(EmittedRelaxableFragments
,
56 "Number of emitted assembler fragments - relaxable");
57 STATISTIC(EmittedDataFragments
,
58 "Number of emitted assembler fragments - data");
59 STATISTIC(EmittedCompactEncodedInstFragments
,
60 "Number of emitted assembler fragments - compact encoded inst");
61 STATISTIC(EmittedAlignFragments
,
62 "Number of emitted assembler fragments - align");
63 STATISTIC(EmittedFillFragments
,
64 "Number of emitted assembler fragments - fill");
65 STATISTIC(EmittedNopsFragments
, "Number of emitted assembler fragments - nops");
66 STATISTIC(EmittedOrgFragments
, "Number of emitted assembler fragments - org");
67 STATISTIC(evaluateFixup
, "Number of evaluated fixups");
68 STATISTIC(ObjectBytes
, "Number of emitted object file bytes");
69 STATISTIC(RelaxationSteps
, "Number of assembler layout and relaxation steps");
70 STATISTIC(RelaxedInstructions
, "Number of relaxed instructions");
72 } // end namespace stats
73 } // end anonymous namespace
75 // FIXME FIXME FIXME: There are number of places in this file where we convert
76 // what is a 64-bit assembler value used for computation into a value in the
77 // object file, which may truncate it. We should detect that truncation where
78 // invalid and report errors back.
82 MCAssembler::MCAssembler(MCContext
&Context
,
83 std::unique_ptr
<MCAsmBackend
> Backend
,
84 std::unique_ptr
<MCCodeEmitter
> Emitter
,
85 std::unique_ptr
<MCObjectWriter
> Writer
)
86 : Context(Context
), Backend(std::move(Backend
)),
87 Emitter(std::move(Emitter
)), Writer(std::move(Writer
)) {}
89 void MCAssembler::reset() {
96 // reset objects owned by us
98 getBackendPtr()->reset();
100 getEmitterPtr()->reset();
105 bool MCAssembler::registerSection(MCSection
&Section
) {
106 if (Section
.isRegistered())
108 assert(Section
.curFragList()->Head
&& "allocInitialFragment not called");
109 Sections
.push_back(&Section
);
110 Section
.setIsRegistered(true);
114 bool MCAssembler::isThumbFunc(const MCSymbol
*Symbol
) const {
115 if (ThumbFuncs
.count(Symbol
))
118 if (!Symbol
->isVariable())
121 const MCExpr
*Expr
= Symbol
->getVariableValue();
124 if (!Expr
->evaluateAsRelocatable(V
, nullptr, nullptr))
127 if (V
.getSymB() || V
.getRefKind() != MCSymbolRefExpr::VK_None
)
130 const MCSymbolRefExpr
*Ref
= V
.getSymA();
134 if (Ref
->getKind() != MCSymbolRefExpr::VK_None
)
137 const MCSymbol
&Sym
= Ref
->getSymbol();
138 if (!isThumbFunc(&Sym
))
141 ThumbFuncs
.insert(Symbol
); // Cache it.
145 bool MCAssembler::evaluateFixup(const MCFixup
&Fixup
, const MCFragment
*DF
,
146 MCValue
&Target
, const MCSubtargetInfo
*STI
,
147 uint64_t &Value
, bool &WasForced
) const {
148 ++stats::evaluateFixup
;
150 // FIXME: This code has some duplication with recordRelocation. We should
151 // probably merge the two into a single callback that tries to evaluate a
152 // fixup and records a relocation if one is needed.
154 // On error claim to have completely evaluated the fixup, to prevent any
155 // further processing from being done.
156 const MCExpr
*Expr
= Fixup
.getValue();
157 MCContext
&Ctx
= getContext();
160 if (!Expr
->evaluateAsRelocatable(Target
, this, &Fixup
)) {
161 Ctx
.reportError(Fixup
.getLoc(), "expected relocatable expression");
164 if (const MCSymbolRefExpr
*RefB
= Target
.getSymB()) {
165 if (RefB
->getKind() != MCSymbolRefExpr::VK_None
) {
166 Ctx
.reportError(Fixup
.getLoc(),
167 "unsupported subtraction of qualified symbol");
172 assert(getBackendPtr() && "Expected assembler backend");
173 bool IsTarget
= getBackendPtr()->getFixupKindInfo(Fixup
.getKind()).Flags
&
174 MCFixupKindInfo::FKF_IsTarget
;
177 return getBackend().evaluateTargetFixup(*this, Fixup
, DF
, Target
, STI
,
180 unsigned FixupFlags
= getBackendPtr()->getFixupKindInfo(Fixup
.getKind()).Flags
;
181 bool IsPCRel
= getBackendPtr()->getFixupKindInfo(Fixup
.getKind()).Flags
&
182 MCFixupKindInfo::FKF_IsPCRel
;
184 bool IsResolved
= false;
186 if (Target
.getSymB()) {
188 } else if (!Target
.getSymA()) {
191 const MCSymbolRefExpr
*A
= Target
.getSymA();
192 const MCSymbol
&SA
= A
->getSymbol();
193 if (A
->getKind() != MCSymbolRefExpr::VK_None
|| SA
.isUndefined()) {
196 IsResolved
= (FixupFlags
& MCFixupKindInfo::FKF_Constant
) ||
197 getWriter().isSymbolRefDifferenceFullyResolvedImpl(
198 *this, SA
, *DF
, false, true);
202 IsResolved
= Target
.isAbsolute();
205 Value
= Target
.getConstant();
207 if (const MCSymbolRefExpr
*A
= Target
.getSymA()) {
208 const MCSymbol
&Sym
= A
->getSymbol();
210 Value
+= getSymbolOffset(Sym
);
212 if (const MCSymbolRefExpr
*B
= Target
.getSymB()) {
213 const MCSymbol
&Sym
= B
->getSymbol();
215 Value
-= getSymbolOffset(Sym
);
218 bool ShouldAlignPC
= getBackend().getFixupKindInfo(Fixup
.getKind()).Flags
&
219 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits
;
220 assert((ShouldAlignPC
? IsPCRel
: true) &&
221 "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!");
224 uint64_t Offset
= getFragmentOffset(*DF
) + Fixup
.getOffset();
226 // A number of ARM fixups in Thumb mode require that the effective PC
227 // address be determined as the 32-bit aligned version of the actual offset.
228 if (ShouldAlignPC
) Offset
&= ~0x3;
232 // Let the backend force a relocation if needed.
234 getBackend().shouldForceRelocation(*this, Fixup
, Target
, STI
)) {
239 // A linker relaxation target may emit ADD/SUB relocations for A-B+C. Let
240 // recordRelocation handle non-VK_None cases like A@plt-B+C.
241 if (!IsResolved
&& Target
.getSymA() && Target
.getSymB() &&
242 Target
.getSymA()->getKind() == MCSymbolRefExpr::VK_None
&&
243 getBackend().handleAddSubRelocations(*this, *DF
, Fixup
, Target
, Value
))
249 uint64_t MCAssembler::computeFragmentSize(const MCFragment
&F
) const {
250 assert(getBackendPtr() && "Requires assembler backend");
251 switch (F
.getKind()) {
252 case MCFragment::FT_Data
:
253 return cast
<MCDataFragment
>(F
).getContents().size();
254 case MCFragment::FT_Relaxable
:
255 return cast
<MCRelaxableFragment
>(F
).getContents().size();
256 case MCFragment::FT_CompactEncodedInst
:
257 return cast
<MCCompactEncodedInstFragment
>(F
).getContents().size();
258 case MCFragment::FT_Fill
: {
259 auto &FF
= cast
<MCFillFragment
>(F
);
260 int64_t NumValues
= 0;
261 if (!FF
.getNumValues().evaluateKnownAbsolute(NumValues
, *this)) {
262 getContext().reportError(FF
.getLoc(),
263 "expected assembly-time absolute expression");
266 int64_t Size
= NumValues
* FF
.getValueSize();
268 getContext().reportError(FF
.getLoc(), "invalid number of bytes");
274 case MCFragment::FT_Nops
:
275 return cast
<MCNopsFragment
>(F
).getNumBytes();
277 case MCFragment::FT_LEB
:
278 return cast
<MCLEBFragment
>(F
).getContents().size();
280 case MCFragment::FT_BoundaryAlign
:
281 return cast
<MCBoundaryAlignFragment
>(F
).getSize();
283 case MCFragment::FT_SymbolId
:
286 case MCFragment::FT_Align
: {
287 const MCAlignFragment
&AF
= cast
<MCAlignFragment
>(F
);
288 unsigned Offset
= getFragmentOffset(AF
);
289 unsigned Size
= offsetToAlignment(Offset
, AF
.getAlignment());
291 // Insert extra Nops for code alignment if the target define
292 // shouldInsertExtraNopBytesForCodeAlign target hook.
293 if (AF
.getParent()->useCodeAlign() && AF
.hasEmitNops() &&
294 getBackend().shouldInsertExtraNopBytesForCodeAlign(AF
, Size
))
297 // If we are padding with nops, force the padding to be larger than the
299 if (Size
> 0 && AF
.hasEmitNops()) {
300 while (Size
% getBackend().getMinimumNopSize())
301 Size
+= AF
.getAlignment().value();
303 if (Size
> AF
.getMaxBytesToEmit())
308 case MCFragment::FT_Org
: {
309 const MCOrgFragment
&OF
= cast
<MCOrgFragment
>(F
);
311 if (!OF
.getOffset().evaluateAsValue(Value
, *this)) {
312 getContext().reportError(OF
.getLoc(),
313 "expected assembly-time absolute expression");
317 uint64_t FragmentOffset
= getFragmentOffset(OF
);
318 int64_t TargetLocation
= Value
.getConstant();
319 if (const MCSymbolRefExpr
*A
= Value
.getSymA()) {
321 if (!getSymbolOffset(A
->getSymbol(), Val
)) {
322 getContext().reportError(OF
.getLoc(), "expected absolute expression");
325 TargetLocation
+= Val
;
327 int64_t Size
= TargetLocation
- FragmentOffset
;
328 if (Size
< 0 || Size
>= 0x40000000) {
329 getContext().reportError(
330 OF
.getLoc(), "invalid .org offset '" + Twine(TargetLocation
) +
331 "' (at offset '" + Twine(FragmentOffset
) + "')");
337 case MCFragment::FT_Dwarf
:
338 return cast
<MCDwarfLineAddrFragment
>(F
).getContents().size();
339 case MCFragment::FT_DwarfFrame
:
340 return cast
<MCDwarfCallFrameFragment
>(F
).getContents().size();
341 case MCFragment::FT_CVInlineLines
:
342 return cast
<MCCVInlineLineTableFragment
>(F
).getContents().size();
343 case MCFragment::FT_CVDefRange
:
344 return cast
<MCCVDefRangeFragment
>(F
).getContents().size();
345 case MCFragment::FT_PseudoProbe
:
346 return cast
<MCPseudoProbeAddrFragment
>(F
).getContents().size();
347 case MCFragment::FT_Dummy
:
348 llvm_unreachable("Should not have been added");
351 llvm_unreachable("invalid fragment kind");
354 // Compute the amount of padding required before the fragment \p F to
355 // obey bundling restrictions, where \p FOffset is the fragment's offset in
356 // its section and \p FSize is the fragment's size.
357 static uint64_t computeBundlePadding(unsigned BundleSize
,
358 const MCEncodedFragment
*F
,
359 uint64_t FOffset
, uint64_t FSize
) {
360 uint64_t OffsetInBundle
= FOffset
& (BundleSize
- 1);
361 uint64_t EndOfFragment
= OffsetInBundle
+ FSize
;
363 // There are two kinds of bundling restrictions:
365 // 1) For alignToBundleEnd(), add padding to ensure that the fragment will
366 // *end* on a bundle boundary.
367 // 2) Otherwise, check if the fragment would cross a bundle boundary. If it
368 // would, add padding until the end of the bundle so that the fragment
369 // will start in a new one.
370 if (F
->alignToBundleEnd()) {
371 // Three possibilities here:
373 // A) The fragment just happens to end at a bundle boundary, so we're good.
374 // B) The fragment ends before the current bundle boundary: pad it just
375 // enough to reach the boundary.
376 // C) The fragment ends after the current bundle boundary: pad it until it
377 // reaches the end of the next bundle boundary.
379 // Note: this code could be made shorter with some modulo trickery, but it's
380 // intentionally kept in its more explicit form for simplicity.
381 if (EndOfFragment
== BundleSize
)
383 else if (EndOfFragment
< BundleSize
)
384 return BundleSize
- EndOfFragment
;
385 else { // EndOfFragment > BundleSize
386 return 2 * BundleSize
- EndOfFragment
;
388 } else if (OffsetInBundle
> 0 && EndOfFragment
> BundleSize
)
389 return BundleSize
- OffsetInBundle
;
394 void MCAssembler::layoutBundle(MCFragment
*Prev
, MCFragment
*F
) const {
395 // If bundling is enabled and this fragment has instructions in it, it has to
396 // obey the bundling restrictions. With padding, we'll have:
401 // -------------------------------------
402 // Prev |##########| F |
403 // -------------------------------------
408 // The fragment's offset will point to after the padding, and its computed
409 // size won't include the padding.
411 // ".align N" is an example of a directive that introduces multiple
412 // fragments. We could add a special case to handle ".align N" by emitting
413 // within-fragment padding (which would produce less padding when N is less
414 // than the bundle size), but for now we don't.
416 assert(isa
<MCEncodedFragment
>(F
) &&
417 "Only MCEncodedFragment implementations have instructions");
418 MCEncodedFragment
*EF
= cast
<MCEncodedFragment
>(F
);
419 uint64_t FSize
= computeFragmentSize(*EF
);
421 if (FSize
> getBundleAlignSize())
422 report_fatal_error("Fragment can't be larger than a bundle size");
424 uint64_t RequiredBundlePadding
=
425 computeBundlePadding(getBundleAlignSize(), EF
, EF
->Offset
, FSize
);
426 if (RequiredBundlePadding
> UINT8_MAX
)
427 report_fatal_error("Padding cannot exceed 255 bytes");
428 EF
->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding
));
429 EF
->Offset
+= RequiredBundlePadding
;
430 if (auto *DF
= dyn_cast_or_null
<MCDataFragment
>(Prev
))
431 if (DF
->getContents().empty())
432 DF
->Offset
= EF
->Offset
;
435 void MCAssembler::ensureValid(MCSection
&Sec
) const {
438 Sec
.setHasLayout(true);
439 MCFragment
*Prev
= nullptr;
441 for (MCFragment
&F
: Sec
) {
443 if (isBundlingEnabled() && F
.hasInstructions()) {
444 layoutBundle(Prev
, &F
);
447 Offset
+= computeFragmentSize(F
);
452 uint64_t MCAssembler::getFragmentOffset(const MCFragment
&F
) const {
453 ensureValid(*F
.getParent());
457 // Simple getSymbolOffset helper for the non-variable case.
458 static bool getLabelOffset(const MCAssembler
&Asm
, const MCSymbol
&S
,
459 bool ReportError
, uint64_t &Val
) {
460 if (!S
.getFragment()) {
462 report_fatal_error("unable to evaluate offset to undefined symbol '" +
466 Val
= Asm
.getFragmentOffset(*S
.getFragment()) + S
.getOffset();
470 static bool getSymbolOffsetImpl(const MCAssembler
&Asm
, const MCSymbol
&S
,
471 bool ReportError
, uint64_t &Val
) {
473 return getLabelOffset(Asm
, S
, ReportError
, Val
);
475 // If SD is a variable, evaluate it.
477 if (!S
.getVariableValue()->evaluateAsValue(Target
, Asm
))
478 report_fatal_error("unable to evaluate offset for variable '" +
481 uint64_t Offset
= Target
.getConstant();
483 const MCSymbolRefExpr
*A
= Target
.getSymA();
486 // FIXME: On most platforms, `Target`'s component symbols are labels from
487 // having been simplified during evaluation, but on Mach-O they can be
488 // variables due to PR19203. This, and the line below for `B` can be
489 // restored to call `getLabelOffset` when PR19203 is fixed.
490 if (!getSymbolOffsetImpl(Asm
, A
->getSymbol(), ReportError
, ValA
))
495 const MCSymbolRefExpr
*B
= Target
.getSymB();
498 if (!getSymbolOffsetImpl(Asm
, B
->getSymbol(), ReportError
, ValB
))
507 bool MCAssembler::getSymbolOffset(const MCSymbol
&S
, uint64_t &Val
) const {
508 return getSymbolOffsetImpl(*this, S
, false, Val
);
511 uint64_t MCAssembler::getSymbolOffset(const MCSymbol
&S
) const {
513 getSymbolOffsetImpl(*this, S
, true, Val
);
517 const MCSymbol
*MCAssembler::getBaseSymbol(const MCSymbol
&Symbol
) const {
519 if (!Symbol
.isVariable())
522 const MCExpr
*Expr
= Symbol
.getVariableValue();
524 if (!Expr
->evaluateAsValue(Value
, *this)) {
525 getContext().reportError(Expr
->getLoc(),
526 "expression could not be evaluated");
530 const MCSymbolRefExpr
*RefB
= Value
.getSymB();
532 getContext().reportError(
534 Twine("symbol '") + RefB
->getSymbol().getName() +
535 "' could not be evaluated in a subtraction expression");
539 const MCSymbolRefExpr
*A
= Value
.getSymA();
543 const MCSymbol
&ASym
= A
->getSymbol();
544 if (ASym
.isCommon()) {
545 getContext().reportError(Expr
->getLoc(),
546 "Common symbol '" + ASym
.getName() +
547 "' cannot be used in assignment expr");
554 uint64_t MCAssembler::getSectionAddressSize(const MCSection
&Sec
) const {
556 // The size is the last fragment's end offset.
557 const MCFragment
&F
= *Sec
.curFragList()->Tail
;
558 return getFragmentOffset(F
) + computeFragmentSize(F
);
561 uint64_t MCAssembler::getSectionFileSize(const MCSection
&Sec
) const {
562 // Virtual sections have no file size.
563 if (Sec
.isVirtualSection())
565 return getSectionAddressSize(Sec
);
568 bool MCAssembler::registerSymbol(const MCSymbol
&Symbol
) {
569 bool Changed
= !Symbol
.isRegistered();
571 Symbol
.setIsRegistered(true);
572 Symbols
.push_back(&Symbol
);
577 void MCAssembler::writeFragmentPadding(raw_ostream
&OS
,
578 const MCEncodedFragment
&EF
,
579 uint64_t FSize
) const {
580 assert(getBackendPtr() && "Expected assembler backend");
581 // Should NOP padding be written out before this fragment?
582 unsigned BundlePadding
= EF
.getBundlePadding();
583 if (BundlePadding
> 0) {
584 assert(isBundlingEnabled() &&
585 "Writing bundle padding with disabled bundling");
586 assert(EF
.hasInstructions() &&
587 "Writing bundle padding for a fragment without instructions");
589 unsigned TotalLength
= BundlePadding
+ static_cast<unsigned>(FSize
);
590 const MCSubtargetInfo
*STI
= EF
.getSubtargetInfo();
591 if (EF
.alignToBundleEnd() && TotalLength
> getBundleAlignSize()) {
592 // If the padding itself crosses a bundle boundary, it must be emitted
593 // in 2 pieces, since even nop instructions must not cross boundaries.
594 // v--------------v <- BundleAlignSize
595 // v---------v <- BundlePadding
596 // ----------------------------
597 // | Prev |####|####| F |
598 // ----------------------------
599 // ^-------------------^ <- TotalLength
600 unsigned DistanceToBoundary
= TotalLength
- getBundleAlignSize();
601 if (!getBackend().writeNopData(OS
, DistanceToBoundary
, STI
))
602 report_fatal_error("unable to write NOP sequence of " +
603 Twine(DistanceToBoundary
) + " bytes");
604 BundlePadding
-= DistanceToBoundary
;
606 if (!getBackend().writeNopData(OS
, BundlePadding
, STI
))
607 report_fatal_error("unable to write NOP sequence of " +
608 Twine(BundlePadding
) + " bytes");
612 /// Write the fragment \p F to the output file.
613 static void writeFragment(raw_ostream
&OS
, const MCAssembler
&Asm
,
614 const MCFragment
&F
) {
615 // FIXME: Embed in fragments instead?
616 uint64_t FragmentSize
= Asm
.computeFragmentSize(F
);
618 llvm::endianness Endian
= Asm
.getBackend().Endian
;
620 if (const MCEncodedFragment
*EF
= dyn_cast
<MCEncodedFragment
>(&F
))
621 Asm
.writeFragmentPadding(OS
, *EF
, FragmentSize
);
623 // This variable (and its dummy usage) is to participate in the assert at
624 // the end of the function.
625 uint64_t Start
= OS
.tell();
628 ++stats::EmittedFragments
;
630 switch (F
.getKind()) {
631 case MCFragment::FT_Align
: {
632 ++stats::EmittedAlignFragments
;
633 const MCAlignFragment
&AF
= cast
<MCAlignFragment
>(F
);
634 assert(AF
.getValueSize() && "Invalid virtual align in concrete fragment!");
636 uint64_t Count
= FragmentSize
/ AF
.getValueSize();
638 // FIXME: This error shouldn't actually occur (the front end should emit
639 // multiple .align directives to enforce the semantics it wants), but is
640 // severe enough that we want to report it. How to handle this?
641 if (Count
* AF
.getValueSize() != FragmentSize
)
642 report_fatal_error("undefined .align directive, value size '" +
643 Twine(AF
.getValueSize()) +
644 "' is not a divisor of padding size '" +
645 Twine(FragmentSize
) + "'");
647 // See if we are aligning with nops, and if so do that first to try to fill
648 // the Count bytes. Then if that did not fill any bytes or there are any
649 // bytes left to fill use the Value and ValueSize to fill the rest.
650 // If we are aligning with nops, ask that target to emit the right data.
651 if (AF
.hasEmitNops()) {
652 if (!Asm
.getBackend().writeNopData(OS
, Count
, AF
.getSubtargetInfo()))
653 report_fatal_error("unable to write nop sequence of " +
654 Twine(Count
) + " bytes");
658 // Otherwise, write out in multiples of the value size.
659 for (uint64_t i
= 0; i
!= Count
; ++i
) {
660 switch (AF
.getValueSize()) {
661 default: llvm_unreachable("Invalid size!");
662 case 1: OS
<< char(AF
.getValue()); break;
664 support::endian::write
<uint16_t>(OS
, AF
.getValue(), Endian
);
667 support::endian::write
<uint32_t>(OS
, AF
.getValue(), Endian
);
670 support::endian::write
<uint64_t>(OS
, AF
.getValue(), Endian
);
677 case MCFragment::FT_Data
:
678 ++stats::EmittedDataFragments
;
679 OS
<< cast
<MCDataFragment
>(F
).getContents();
682 case MCFragment::FT_Relaxable
:
683 ++stats::EmittedRelaxableFragments
;
684 OS
<< cast
<MCRelaxableFragment
>(F
).getContents();
687 case MCFragment::FT_CompactEncodedInst
:
688 ++stats::EmittedCompactEncodedInstFragments
;
689 OS
<< cast
<MCCompactEncodedInstFragment
>(F
).getContents();
692 case MCFragment::FT_Fill
: {
693 ++stats::EmittedFillFragments
;
694 const MCFillFragment
&FF
= cast
<MCFillFragment
>(F
);
695 uint64_t V
= FF
.getValue();
696 unsigned VSize
= FF
.getValueSize();
697 const unsigned MaxChunkSize
= 16;
698 char Data
[MaxChunkSize
];
699 assert(0 < VSize
&& VSize
<= MaxChunkSize
&& "Illegal fragment fill size");
700 // Duplicate V into Data as byte vector to reduce number of
701 // writes done. As such, do endian conversion here.
702 for (unsigned I
= 0; I
!= VSize
; ++I
) {
703 unsigned index
= Endian
== llvm::endianness::little
? I
: (VSize
- I
- 1);
704 Data
[I
] = uint8_t(V
>> (index
* 8));
706 for (unsigned I
= VSize
; I
< MaxChunkSize
; ++I
)
707 Data
[I
] = Data
[I
- VSize
];
709 // Set to largest multiple of VSize in Data.
710 const unsigned NumPerChunk
= MaxChunkSize
/ VSize
;
711 // Set ChunkSize to largest multiple of VSize in Data
712 const unsigned ChunkSize
= VSize
* NumPerChunk
;
714 // Do copies by chunk.
715 StringRef
Ref(Data
, ChunkSize
);
716 for (uint64_t I
= 0, E
= FragmentSize
/ ChunkSize
; I
!= E
; ++I
)
719 // do remainder if needed.
720 unsigned TrailingCount
= FragmentSize
% ChunkSize
;
722 OS
.write(Data
, TrailingCount
);
726 case MCFragment::FT_Nops
: {
727 ++stats::EmittedNopsFragments
;
728 const MCNopsFragment
&NF
= cast
<MCNopsFragment
>(F
);
730 int64_t NumBytes
= NF
.getNumBytes();
731 int64_t ControlledNopLength
= NF
.getControlledNopLength();
732 int64_t MaximumNopLength
=
733 Asm
.getBackend().getMaximumNopSize(*NF
.getSubtargetInfo());
735 assert(NumBytes
> 0 && "Expected positive NOPs fragment size");
736 assert(ControlledNopLength
>= 0 && "Expected non-negative NOP size");
738 if (ControlledNopLength
> MaximumNopLength
) {
739 Asm
.getContext().reportError(NF
.getLoc(),
740 "illegal NOP size " +
741 std::to_string(ControlledNopLength
) +
742 ". (expected within [0, " +
743 std::to_string(MaximumNopLength
) + "])");
744 // Clamp the NOP length as reportError does not stop the execution
746 ControlledNopLength
= MaximumNopLength
;
749 // Use maximum value if the size of each NOP is not specified
750 if (!ControlledNopLength
)
751 ControlledNopLength
= MaximumNopLength
;
754 uint64_t NumBytesToEmit
=
755 (uint64_t)std::min(NumBytes
, ControlledNopLength
);
756 assert(NumBytesToEmit
&& "try to emit empty NOP instruction");
757 if (!Asm
.getBackend().writeNopData(OS
, NumBytesToEmit
,
758 NF
.getSubtargetInfo())) {
759 report_fatal_error("unable to write nop sequence of the remaining " +
760 Twine(NumBytesToEmit
) + " bytes");
763 NumBytes
-= NumBytesToEmit
;
768 case MCFragment::FT_LEB
: {
769 const MCLEBFragment
&LF
= cast
<MCLEBFragment
>(F
);
770 OS
<< LF
.getContents();
774 case MCFragment::FT_BoundaryAlign
: {
775 const MCBoundaryAlignFragment
&BF
= cast
<MCBoundaryAlignFragment
>(F
);
776 if (!Asm
.getBackend().writeNopData(OS
, FragmentSize
, BF
.getSubtargetInfo()))
777 report_fatal_error("unable to write nop sequence of " +
778 Twine(FragmentSize
) + " bytes");
782 case MCFragment::FT_SymbolId
: {
783 const MCSymbolIdFragment
&SF
= cast
<MCSymbolIdFragment
>(F
);
784 support::endian::write
<uint32_t>(OS
, SF
.getSymbol()->getIndex(), Endian
);
788 case MCFragment::FT_Org
: {
789 ++stats::EmittedOrgFragments
;
790 const MCOrgFragment
&OF
= cast
<MCOrgFragment
>(F
);
792 for (uint64_t i
= 0, e
= FragmentSize
; i
!= e
; ++i
)
793 OS
<< char(OF
.getValue());
798 case MCFragment::FT_Dwarf
: {
799 const MCDwarfLineAddrFragment
&OF
= cast
<MCDwarfLineAddrFragment
>(F
);
800 OS
<< OF
.getContents();
803 case MCFragment::FT_DwarfFrame
: {
804 const MCDwarfCallFrameFragment
&CF
= cast
<MCDwarfCallFrameFragment
>(F
);
805 OS
<< CF
.getContents();
808 case MCFragment::FT_CVInlineLines
: {
809 const auto &OF
= cast
<MCCVInlineLineTableFragment
>(F
);
810 OS
<< OF
.getContents();
813 case MCFragment::FT_CVDefRange
: {
814 const auto &DRF
= cast
<MCCVDefRangeFragment
>(F
);
815 OS
<< DRF
.getContents();
818 case MCFragment::FT_PseudoProbe
: {
819 const MCPseudoProbeAddrFragment
&PF
= cast
<MCPseudoProbeAddrFragment
>(F
);
820 OS
<< PF
.getContents();
823 case MCFragment::FT_Dummy
:
824 llvm_unreachable("Should not have been added");
827 assert(OS
.tell() - Start
== FragmentSize
&&
828 "The stream should advance by fragment size");
831 void MCAssembler::writeSectionData(raw_ostream
&OS
,
832 const MCSection
*Sec
) const {
833 assert(getBackendPtr() && "Expected assembler backend");
835 // Ignore virtual sections.
836 if (Sec
->isVirtualSection()) {
837 assert(getSectionFileSize(*Sec
) == 0 && "Invalid size for section!");
839 // Check that contents are only things legal inside a virtual section.
840 for (const MCFragment
&F
: *Sec
) {
841 switch (F
.getKind()) {
842 default: llvm_unreachable("Invalid fragment in virtual section!");
843 case MCFragment::FT_Data
: {
844 // Check that we aren't trying to write a non-zero contents (or fixups)
845 // into a virtual section. This is to support clients which use standard
846 // directives to fill the contents of virtual sections.
847 const MCDataFragment
&DF
= cast
<MCDataFragment
>(F
);
848 if (DF
.fixup_begin() != DF
.fixup_end())
849 getContext().reportError(SMLoc(), Sec
->getVirtualSectionKind() +
850 " section '" + Sec
->getName() +
851 "' cannot have fixups");
852 for (unsigned i
= 0, e
= DF
.getContents().size(); i
!= e
; ++i
)
853 if (DF
.getContents()[i
]) {
854 getContext().reportError(SMLoc(),
855 Sec
->getVirtualSectionKind() +
856 " section '" + Sec
->getName() +
857 "' cannot have non-zero initializers");
862 case MCFragment::FT_Align
:
863 // Check that we aren't trying to write a non-zero value into a virtual
865 assert((cast
<MCAlignFragment
>(F
).getValueSize() == 0 ||
866 cast
<MCAlignFragment
>(F
).getValue() == 0) &&
867 "Invalid align in virtual section!");
869 case MCFragment::FT_Fill
:
870 assert((cast
<MCFillFragment
>(F
).getValue() == 0) &&
871 "Invalid fill in virtual section!");
873 case MCFragment::FT_Org
:
881 uint64_t Start
= OS
.tell();
884 for (const MCFragment
&F
: *Sec
)
885 writeFragment(OS
, *this, F
);
887 assert(getContext().hadError() ||
888 OS
.tell() - Start
== getSectionAddressSize(*Sec
));
891 std::tuple
<MCValue
, uint64_t, bool>
892 MCAssembler::handleFixup(MCFragment
&F
, const MCFixup
&Fixup
,
893 const MCSubtargetInfo
*STI
) {
894 // Evaluate the fixup.
899 evaluateFixup(Fixup
, &F
, Target
, STI
, FixedValue
, WasForced
);
901 // The fixup was unresolved, we need a relocation. Inform the object
902 // writer of the relocation, and give it an opportunity to adjust the
903 // fixup value if need be.
904 getWriter().recordRelocation(*this, &F
, Fixup
, Target
, FixedValue
);
906 return std::make_tuple(Target
, FixedValue
, IsResolved
);
909 void MCAssembler::layout() {
910 assert(getBackendPtr() && "Expected assembler backend");
911 DEBUG_WITH_TYPE("mc-dump", {
912 errs() << "assembler backend - pre-layout\n--\n";
915 // Assign section ordinals.
916 unsigned SectionIndex
= 0;
917 for (MCSection
&Sec
: *this) {
918 Sec
.setOrdinal(SectionIndex
++);
920 // Chain together fragments from all subsections.
921 if (Sec
.Subsections
.size() > 1) {
922 MCDummyFragment Dummy
;
923 MCFragment
*Tail
= &Dummy
;
924 for (auto &[_
, List
] : Sec
.Subsections
) {
926 Tail
->Next
= List
.Head
;
929 Sec
.Subsections
.clear();
930 Sec
.Subsections
.push_back({0u, {Dummy
.getNext(), Tail
}});
931 Sec
.CurFragList
= &Sec
.Subsections
[0].second
;
933 unsigned FragmentIndex
= 0;
934 for (MCFragment
&Frag
: Sec
)
935 Frag
.setLayoutOrder(FragmentIndex
++);
939 // Layout until everything fits.
940 this->HasLayout
= true;
941 while (layoutOnce()) {
942 if (getContext().hadError())
944 // Size of fragments in one section can depend on the size of fragments in
945 // another. If any fragment has changed size, we have to re-layout (and
946 // as a result possibly further relax) all.
947 for (MCSection
&Sec
: *this)
948 Sec
.setHasLayout(false);
951 DEBUG_WITH_TYPE("mc-dump", {
952 errs() << "assembler backend - post-relaxation\n--\n";
955 // Finalize the layout, including fragment lowering.
956 getBackend().finishLayout(*this);
958 DEBUG_WITH_TYPE("mc-dump", {
959 errs() << "assembler backend - final-layout\n--\n";
962 // Allow the object writer a chance to perform post-layout binding (for
963 // example, to set the index fields in the symbol data).
964 getWriter().executePostLayoutBinding(*this);
966 // Evaluate and apply the fixups, generating relocation entries as necessary.
967 for (MCSection
&Sec
: *this) {
968 for (MCFragment
&Frag
: Sec
) {
969 ArrayRef
<MCFixup
> Fixups
;
970 MutableArrayRef
<char> Contents
;
971 const MCSubtargetInfo
*STI
= nullptr;
973 // Process MCAlignFragment and MCEncodedFragmentWithFixups here.
974 switch (Frag
.getKind()) {
977 case MCFragment::FT_Align
: {
978 MCAlignFragment
&AF
= cast
<MCAlignFragment
>(Frag
);
979 // Insert fixup type for code alignment if the target define
980 // shouldInsertFixupForCodeAlign target hook.
981 if (Sec
.useCodeAlign() && AF
.hasEmitNops())
982 getBackend().shouldInsertFixupForCodeAlign(*this, AF
);
985 case MCFragment::FT_Data
: {
986 MCDataFragment
&DF
= cast
<MCDataFragment
>(Frag
);
987 Fixups
= DF
.getFixups();
988 Contents
= DF
.getContents();
989 STI
= DF
.getSubtargetInfo();
990 assert(!DF
.hasInstructions() || STI
!= nullptr);
993 case MCFragment::FT_Relaxable
: {
994 MCRelaxableFragment
&RF
= cast
<MCRelaxableFragment
>(Frag
);
995 Fixups
= RF
.getFixups();
996 Contents
= RF
.getContents();
997 STI
= RF
.getSubtargetInfo();
998 assert(!RF
.hasInstructions() || STI
!= nullptr);
1001 case MCFragment::FT_CVDefRange
: {
1002 MCCVDefRangeFragment
&CF
= cast
<MCCVDefRangeFragment
>(Frag
);
1003 Fixups
= CF
.getFixups();
1004 Contents
= CF
.getContents();
1007 case MCFragment::FT_Dwarf
: {
1008 MCDwarfLineAddrFragment
&DF
= cast
<MCDwarfLineAddrFragment
>(Frag
);
1009 Fixups
= DF
.getFixups();
1010 Contents
= DF
.getContents();
1013 case MCFragment::FT_DwarfFrame
: {
1014 MCDwarfCallFrameFragment
&DF
= cast
<MCDwarfCallFrameFragment
>(Frag
);
1015 Fixups
= DF
.getFixups();
1016 Contents
= DF
.getContents();
1019 case MCFragment::FT_LEB
: {
1020 auto &LF
= cast
<MCLEBFragment
>(Frag
);
1021 Fixups
= LF
.getFixups();
1022 Contents
= LF
.getContents();
1025 case MCFragment::FT_PseudoProbe
: {
1026 MCPseudoProbeAddrFragment
&PF
= cast
<MCPseudoProbeAddrFragment
>(Frag
);
1027 Fixups
= PF
.getFixups();
1028 Contents
= PF
.getContents();
1032 for (const MCFixup
&Fixup
: Fixups
) {
1033 uint64_t FixedValue
;
1036 std::tie(Target
, FixedValue
, IsResolved
) =
1037 handleFixup(Frag
, Fixup
, STI
);
1038 getBackend().applyFixup(*this, Fixup
, Target
, Contents
, FixedValue
,
1045 void MCAssembler::Finish() {
1048 // Write the object file.
1049 stats::ObjectBytes
+= getWriter().writeObject(*this);
1054 bool MCAssembler::fixupNeedsRelaxation(const MCFixup
&Fixup
,
1055 const MCRelaxableFragment
*DF
) const {
1056 assert(getBackendPtr() && "Expected assembler backend");
1060 bool Resolved
= evaluateFixup(Fixup
, DF
, Target
, DF
->getSubtargetInfo(),
1062 if (Target
.getSymA() &&
1063 Target
.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8
&&
1064 Fixup
.getKind() == FK_Data_1
)
1066 return getBackend().fixupNeedsRelaxationAdvanced(*this, Fixup
, Resolved
,
1067 Value
, DF
, WasForced
);
1070 bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment
*F
) const {
1071 assert(getBackendPtr() && "Expected assembler backend");
1072 // If this inst doesn't ever need relaxation, ignore it. This occurs when we
1073 // are intentionally pushing out inst fragments, or because we relaxed a
1074 // previous instruction to one that doesn't need relaxation.
1075 if (!getBackend().mayNeedRelaxation(F
->getInst(), *F
->getSubtargetInfo()))
1078 for (const MCFixup
&Fixup
: F
->getFixups())
1079 if (fixupNeedsRelaxation(Fixup
, F
))
1085 bool MCAssembler::relaxInstruction(MCRelaxableFragment
&F
) {
1086 assert(getEmitterPtr() &&
1087 "Expected CodeEmitter defined for relaxInstruction");
1088 if (!fragmentNeedsRelaxation(&F
))
1091 ++stats::RelaxedInstructions
;
1093 // FIXME-PERF: We could immediately lower out instructions if we can tell
1094 // they are fully resolved, to avoid retesting on later passes.
1096 // Relax the fragment.
1098 MCInst Relaxed
= F
.getInst();
1099 getBackend().relaxInstruction(Relaxed
, *F
.getSubtargetInfo());
1101 // Encode the new instruction.
1103 F
.getFixups().clear();
1104 F
.getContents().clear();
1105 getEmitter().encodeInstruction(Relaxed
, F
.getContents(), F
.getFixups(),
1106 *F
.getSubtargetInfo());
1110 bool MCAssembler::relaxLEB(MCLEBFragment
&LF
) {
1111 const unsigned OldSize
= static_cast<unsigned>(LF
.getContents().size());
1112 unsigned PadTo
= OldSize
;
1114 SmallVectorImpl
<char> &Data
= LF
.getContents();
1115 LF
.getFixups().clear();
1116 // Use evaluateKnownAbsolute for Mach-O as a hack: .subsections_via_symbols
1117 // requires that .uleb128 A-B is foldable where A and B reside in different
1118 // fragments. This is used by __gcc_except_table.
1119 bool Abs
= getWriter().getSubsectionsViaSymbols()
1120 ? LF
.getValue().evaluateKnownAbsolute(Value
, *this)
1121 : LF
.getValue().evaluateAsAbsolute(Value
, *this);
1123 bool Relaxed
, UseZeroPad
;
1124 std::tie(Relaxed
, UseZeroPad
) = getBackend().relaxLEB128(*this, LF
, Value
);
1126 getContext().reportError(LF
.getValue().getLoc(),
1127 Twine(LF
.isSigned() ? ".s" : ".u") +
1128 "leb128 expression is not absolute");
1129 LF
.setValue(MCConstantExpr::create(0, Context
));
1131 uint8_t Tmp
[10]; // maximum size: ceil(64/7)
1132 PadTo
= std::max(PadTo
, encodeULEB128(uint64_t(Value
), Tmp
));
1137 raw_svector_ostream
OSE(Data
);
1138 // The compiler can generate EH table assembly that is impossible to assemble
1139 // without either adding padding to an LEB fragment or adding extra padding
1140 // to a later alignment fragment. To accommodate such tables, relaxation can
1141 // only increase an LEB fragment size here, not decrease it. See PR35809.
1143 encodeSLEB128(Value
, OSE
, PadTo
);
1145 encodeULEB128(Value
, OSE
, PadTo
);
1146 return OldSize
!= LF
.getContents().size();
1149 /// Check if the branch crosses the boundary.
1151 /// \param StartAddr start address of the fused/unfused branch.
1152 /// \param Size size of the fused/unfused branch.
1153 /// \param BoundaryAlignment alignment requirement of the branch.
1154 /// \returns true if the branch cross the boundary.
1155 static bool mayCrossBoundary(uint64_t StartAddr
, uint64_t Size
,
1156 Align BoundaryAlignment
) {
1157 uint64_t EndAddr
= StartAddr
+ Size
;
1158 return (StartAddr
>> Log2(BoundaryAlignment
)) !=
1159 ((EndAddr
- 1) >> Log2(BoundaryAlignment
));
1162 /// Check if the branch is against the boundary.
1164 /// \param StartAddr start address of the fused/unfused branch.
1165 /// \param Size size of the fused/unfused branch.
1166 /// \param BoundaryAlignment alignment requirement of the branch.
1167 /// \returns true if the branch is against the boundary.
1168 static bool isAgainstBoundary(uint64_t StartAddr
, uint64_t Size
,
1169 Align BoundaryAlignment
) {
1170 uint64_t EndAddr
= StartAddr
+ Size
;
1171 return (EndAddr
& (BoundaryAlignment
.value() - 1)) == 0;
1174 /// Check if the branch needs padding.
1176 /// \param StartAddr start address of the fused/unfused branch.
1177 /// \param Size size of the fused/unfused branch.
1178 /// \param BoundaryAlignment alignment requirement of the branch.
1179 /// \returns true if the branch needs padding.
1180 static bool needPadding(uint64_t StartAddr
, uint64_t Size
,
1181 Align BoundaryAlignment
) {
1182 return mayCrossBoundary(StartAddr
, Size
, BoundaryAlignment
) ||
1183 isAgainstBoundary(StartAddr
, Size
, BoundaryAlignment
);
1186 bool MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment
&BF
) {
1187 // BoundaryAlignFragment that doesn't need to align any fragment should not be
1189 if (!BF
.getLastFragment())
1192 uint64_t AlignedOffset
= getFragmentOffset(BF
);
1193 uint64_t AlignedSize
= 0;
1194 for (const MCFragment
*F
= BF
.getNext();; F
= F
->getNext()) {
1195 AlignedSize
+= computeFragmentSize(*F
);
1196 if (F
== BF
.getLastFragment())
1200 Align BoundaryAlignment
= BF
.getAlignment();
1201 uint64_t NewSize
= needPadding(AlignedOffset
, AlignedSize
, BoundaryAlignment
)
1202 ? offsetToAlignment(AlignedOffset
, BoundaryAlignment
)
1204 if (NewSize
== BF
.getSize())
1206 BF
.setSize(NewSize
);
1210 bool MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment
&DF
) {
1212 if (getBackend().relaxDwarfLineAddr(*this, DF
, WasRelaxed
))
1215 MCContext
&Context
= getContext();
1216 uint64_t OldSize
= DF
.getContents().size();
1218 bool Abs
= DF
.getAddrDelta().evaluateKnownAbsolute(AddrDelta
, *this);
1219 assert(Abs
&& "We created a line delta with an invalid expression");
1222 LineDelta
= DF
.getLineDelta();
1223 SmallVectorImpl
<char> &Data
= DF
.getContents();
1225 DF
.getFixups().clear();
1227 MCDwarfLineAddr::encode(Context
, getDWARFLinetableParams(), LineDelta
,
1229 return OldSize
!= Data
.size();
1232 bool MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment
&DF
) {
1234 if (getBackend().relaxDwarfCFA(*this, DF
, WasRelaxed
))
1237 MCContext
&Context
= getContext();
1239 bool Abs
= DF
.getAddrDelta().evaluateAsAbsolute(Value
, *this);
1241 getContext().reportError(DF
.getAddrDelta().getLoc(),
1242 "invalid CFI advance_loc expression");
1243 DF
.setAddrDelta(MCConstantExpr::create(0, Context
));
1247 SmallVectorImpl
<char> &Data
= DF
.getContents();
1248 uint64_t OldSize
= Data
.size();
1250 DF
.getFixups().clear();
1252 MCDwarfFrameEmitter::encodeAdvanceLoc(Context
, Value
, Data
);
1253 return OldSize
!= Data
.size();
1256 bool MCAssembler::relaxCVInlineLineTable(MCCVInlineLineTableFragment
&F
) {
1257 unsigned OldSize
= F
.getContents().size();
1258 getContext().getCVContext().encodeInlineLineTable(*this, F
);
1259 return OldSize
!= F
.getContents().size();
1262 bool MCAssembler::relaxCVDefRange(MCCVDefRangeFragment
&F
) {
1263 unsigned OldSize
= F
.getContents().size();
1264 getContext().getCVContext().encodeDefRange(*this, F
);
1265 return OldSize
!= F
.getContents().size();
1268 bool MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment
&PF
) {
1269 uint64_t OldSize
= PF
.getContents().size();
1271 bool Abs
= PF
.getAddrDelta().evaluateKnownAbsolute(AddrDelta
, *this);
1272 assert(Abs
&& "We created a pseudo probe with an invalid expression");
1274 SmallVectorImpl
<char> &Data
= PF
.getContents();
1276 raw_svector_ostream
OSE(Data
);
1277 PF
.getFixups().clear();
1279 // AddrDelta is a signed integer
1280 encodeSLEB128(AddrDelta
, OSE
, OldSize
);
1281 return OldSize
!= Data
.size();
1284 bool MCAssembler::relaxFragment(MCFragment
&F
) {
1285 switch(F
.getKind()) {
1288 case MCFragment::FT_Relaxable
:
1289 assert(!getRelaxAll() &&
1290 "Did not expect a MCRelaxableFragment in RelaxAll mode");
1291 return relaxInstruction(cast
<MCRelaxableFragment
>(F
));
1292 case MCFragment::FT_Dwarf
:
1293 return relaxDwarfLineAddr(cast
<MCDwarfLineAddrFragment
>(F
));
1294 case MCFragment::FT_DwarfFrame
:
1295 return relaxDwarfCallFrameFragment(cast
<MCDwarfCallFrameFragment
>(F
));
1296 case MCFragment::FT_LEB
:
1297 return relaxLEB(cast
<MCLEBFragment
>(F
));
1298 case MCFragment::FT_BoundaryAlign
:
1299 return relaxBoundaryAlign(cast
<MCBoundaryAlignFragment
>(F
));
1300 case MCFragment::FT_CVInlineLines
:
1301 return relaxCVInlineLineTable(cast
<MCCVInlineLineTableFragment
>(F
));
1302 case MCFragment::FT_CVDefRange
:
1303 return relaxCVDefRange(cast
<MCCVDefRangeFragment
>(F
));
1304 case MCFragment::FT_PseudoProbe
:
1305 return relaxPseudoProbeAddr(cast
<MCPseudoProbeAddrFragment
>(F
));
1309 bool MCAssembler::layoutOnce() {
1310 ++stats::RelaxationSteps
;
1312 bool Changed
= false;
1313 for (MCSection
&Sec
: *this)
1314 for (MCFragment
&Frag
: Sec
)
1315 if (relaxFragment(Frag
))
1320 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1321 LLVM_DUMP_METHOD
void MCAssembler::dump() const{
1322 raw_ostream
&OS
= errs();
1324 OS
<< "<MCAssembler\n";
1325 OS
<< " Sections:[\n ";
1327 for (const MCSection
&Sec
: *this) {
1338 for (const MCSymbol
&Sym
: symbols()) {
1345 OS
<< ", Index:" << Sym
.getIndex() << ", ";