[llvm] MC: Restructure MCAlignFragment as a fixed part an alignment tail (PR #149030)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 16 00:21:24 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-loongarch
@llvm/pr-subscribers-backend-risc-v
Author: Fangrui Song (MaskRay)
<details>
<summary>Changes</summary>
Follow-up to #<!-- -->148544
---
Patch is 26.86 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/149030.diff
12 Files Affected:
- (modified) llvm/include/llvm/MC/MCAsmBackend.h (+2-5)
- (modified) llvm/include/llvm/MC/MCSection.h (+50-46)
- (modified) llvm/lib/MC/MCAssembler.cpp (+66-70)
- (modified) llvm/lib/MC/MCExpr.cpp (+4-4)
- (modified) llvm/lib/MC/MCFragment.cpp (+8-9)
- (modified) llvm/lib/MC/MCObjectStreamer.cpp (+4-4)
- (modified) llvm/lib/MC/WasmObjectWriter.cpp (+10-12)
- (modified) llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp (+5-5)
- (modified) llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h (+2-3)
- (modified) llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp (+3-3)
- (modified) llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h (+2-3)
- (modified) llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp (+1-3)
``````````diff
diff --git a/llvm/include/llvm/MC/MCAsmBackend.h b/llvm/include/llvm/MC/MCAsmBackend.h
index 0322cbe6cbe8d..93259b0ea6d74 100644
--- a/llvm/include/llvm/MC/MCAsmBackend.h
+++ b/llvm/include/llvm/MC/MCAsmBackend.h
@@ -18,9 +18,7 @@
namespace llvm {
-class MCAlignFragment;
class MCFragment;
-class MCLEBFragment;
class MCSymbol;
class MCAssembler;
class MCContext;
@@ -108,15 +106,14 @@ class LLVM_ABI MCAsmBackend {
/// Hook to check if extra nop bytes must be inserted for alignment directive.
/// For some targets this may be necessary in order to support linker
/// relaxation. The number of bytes to insert are returned in Size.
- virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF,
+ virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF,
unsigned &Size) {
return false;
}
/// Hook which indicates if the target requires a fixup to be generated when
/// handling an align directive in an executable section
- virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm,
- MCAlignFragment &AF) {
+ virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) {
return false;
}
diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h
index 296fdd8af0d14..5d8fa5680c27d 100644
--- a/llvm/include/llvm/MC/MCSection.h
+++ b/llvm/include/llvm/MC/MCSection.h
@@ -254,6 +254,19 @@ class MCFragment {
uint32_t OperandStart;
uint32_t OperandSize;
} relax;
+ struct {
+ // The alignment to ensure, in bytes.
+ Align Alignment;
+ // The size of the integer (in bytes) of \p Value.
+ uint8_t FillLen;
+ // If true, fill with target-specific nop instructions.
+ bool EmitNops;
+ // The maximum number of bytes to emit; if the alignment
+ // cannot be satisfied in this width then this fragment is ignored.
+ unsigned MaxBytesToEmit;
+ // Value to use for filling padding bytes.
+ int64_t Fill;
+ } align;
struct {
// True if this is a sleb128, false if uleb128.
bool IsSigned;
@@ -441,6 +454,43 @@ class MCFragment {
llvm::copy(Inst, S.begin() + u.relax.OperandStart);
}
+ //== FT_Align functions
+ void makeAlign(Align Alignment, int64_t Fill, uint8_t FillLen,
+ unsigned MaxBytesToEmit) {
+ Kind = FT_Align;
+ u.align.EmitNops = false;
+ u.align.Alignment = Alignment;
+ u.align.Fill = Fill;
+ u.align.FillLen = FillLen;
+ u.align.MaxBytesToEmit = MaxBytesToEmit;
+ }
+
+ Align getAlignment() const {
+ assert(Kind == FT_Align);
+ return u.align.Alignment;
+ }
+ int64_t getAlignFill() const {
+ assert(Kind == FT_Align);
+ return u.align.Fill;
+ }
+ uint8_t getAlignFillLen() const {
+ assert(Kind == FT_Align);
+ return u.align.FillLen;
+ }
+ unsigned getAlignMaxBytesToEmit() const {
+ assert(Kind == FT_Align);
+ return u.align.MaxBytesToEmit;
+ }
+ bool hasAlignEmitNops() const {
+ assert(Kind == FT_Align);
+ return u.align.EmitNops;
+ }
+ void setAlignEmitNops(bool Value, const MCSubtargetInfo *STI) {
+ assert(Kind == FT_Align);
+ u.align.EmitNops = Value;
+ this->STI = STI;
+ }
+
//== FT_LEB functions
const MCExpr &getLEBValue() const {
assert(Kind == FT_LEB);
@@ -486,52 +536,6 @@ class MCEncodedFragment : public MCFragment {
: MCFragment(FType, HasInstructions) {}
};
-class MCAlignFragment : public MCFragment {
- /// Flag to indicate that (optimal) NOPs should be emitted instead
- /// of using the provided value. The exact interpretation of this flag is
- /// target dependent.
- bool EmitNops : 1;
-
- /// The alignment to ensure, in bytes.
- Align Alignment;
-
- /// The size of the integer (in bytes) of \p Value.
- uint8_t FillLen;
-
- /// The maximum number of bytes to emit; if the alignment
- /// cannot be satisfied in this width then this fragment is ignored.
- unsigned MaxBytesToEmit;
-
- /// Value to use for filling padding bytes.
- int64_t Fill;
-
- /// When emitting Nops some subtargets have specific nop encodings.
- const MCSubtargetInfo *STI = nullptr;
-
-public:
- MCAlignFragment(Align Alignment, int64_t Fill, uint8_t FillLen,
- unsigned MaxBytesToEmit)
- : MCFragment(FT_Align, false), EmitNops(false), Alignment(Alignment),
- FillLen(FillLen), MaxBytesToEmit(MaxBytesToEmit), Fill(Fill) {}
-
- Align getAlignment() const { return Alignment; }
- int64_t getFill() const { return Fill; }
- uint8_t getFillLen() const { return FillLen; }
- unsigned getMaxBytesToEmit() const { return MaxBytesToEmit; }
-
- bool hasEmitNops() const { return EmitNops; }
- void setEmitNops(bool Value, const MCSubtargetInfo *STI) {
- EmitNops = Value;
- this->STI = STI;
- }
-
- const MCSubtargetInfo *getSubtargetInfo() const { return STI; }
-
- static bool classof(const MCFragment *F) {
- return F->getKind() == MCFragment::FT_Align;
- }
-};
-
class MCFillFragment : public MCFragment {
uint8_t ValueSize;
/// Value to use for filling bytes.
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index d4d10e0cd74a5..a084af79e9ec8 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -228,25 +228,24 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
return 4;
case MCFragment::FT_Align: {
- const MCAlignFragment &AF = cast<MCAlignFragment>(F);
- unsigned Offset = getFragmentOffset(AF);
- unsigned Size = offsetToAlignment(Offset, AF.getAlignment());
+ unsigned Offset = F.Offset + F.getFixedSize();
+ unsigned Size = offsetToAlignment(Offset, F.getAlignment());
// Insert extra Nops for code alignment if the target define
// shouldInsertExtraNopBytesForCodeAlign target hook.
- if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() &&
- getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size))
- return Size;
+ if (F.getParent()->useCodeAlign() && F.hasAlignEmitNops() &&
+ getBackend().shouldInsertExtraNopBytesForCodeAlign(F, Size))
+ return F.getFixedSize() + Size;
// If we are padding with nops, force the padding to be larger than the
// minimum nop size.
- if (Size > 0 && AF.hasEmitNops()) {
+ if (Size > 0 && F.hasAlignEmitNops()) {
while (Size % getBackend().getMinimumNopSize())
- Size += AF.getAlignment().value();
+ Size += F.getAlignment().value();
}
- if (Size > AF.getMaxBytesToEmit())
- return 0;
- return Size;
+ if (Size > F.getAlignMaxBytesToEmit())
+ Size = 0;
+ return F.getFixedSize() + Size;
}
case MCFragment::FT_Org: {
@@ -416,6 +415,7 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
switch (F.getKind()) {
case MCFragment::FT_Data:
case MCFragment::FT_Relaxable:
+ case MCFragment::FT_Align:
case MCFragment::FT_LEB:
case MCFragment::FT_Dwarf:
case MCFragment::FT_DwarfFrame:
@@ -429,48 +429,46 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
const auto &EF = cast<MCFragment>(F);
OS << StringRef(EF.getContents().data(), EF.getContents().size());
OS << StringRef(EF.getVarContents().data(), EF.getVarContents().size());
- break;
- }
- case MCFragment::FT_Align: {
- ++stats::EmittedAlignFragments;
- const MCAlignFragment &AF = cast<MCAlignFragment>(F);
- assert(AF.getFillLen() && "Invalid virtual align in concrete fragment!");
-
- uint64_t Count = FragmentSize / AF.getFillLen();
- assert(FragmentSize % AF.getFillLen() == 0 &&
- "computeFragmentSize computed size is incorrect");
-
- // See if we are aligning with nops, and if so do that first to try to fill
- // the Count bytes. Then if that did not fill any bytes or there are any
- // bytes left to fill use the Value and ValueSize to fill the rest.
- // If we are aligning with nops, ask that target to emit the right data.
- if (AF.hasEmitNops()) {
- if (!Asm.getBackend().writeNopData(OS, Count, AF.getSubtargetInfo()))
- report_fatal_error("unable to write nop sequence of " +
- Twine(Count) + " bytes");
- break;
- }
-
- // Otherwise, write out in multiples of the value size.
- for (uint64_t i = 0; i != Count; ++i) {
- switch (AF.getFillLen()) {
- default: llvm_unreachable("Invalid size!");
- case 1:
- OS << char(AF.getFill());
- break;
- case 2:
- support::endian::write<uint16_t>(OS, AF.getFill(), Endian);
- break;
- case 4:
- support::endian::write<uint32_t>(OS, AF.getFill(), Endian);
- break;
- case 8:
- support::endian::write<uint64_t>(OS, AF.getFill(), Endian);
- break;
+ if (F.getKind() == MCFragment::FT_Align) {
+ ++stats::EmittedAlignFragments;
+ assert(F.getAlignFillLen() &&
+ "Invalid virtual align in concrete fragment!");
+
+ uint64_t Count = (FragmentSize - F.getFixedSize()) / F.getAlignFillLen();
+ assert((FragmentSize - F.getFixedSize()) % F.getAlignFillLen() == 0 &&
+ "computeFragmentSize computed size is incorrect");
+
+ // See if we are aligning with nops, and if so do that first to try to
+ // fill the Count bytes. Then if that did not fill any bytes or there are
+ // any bytes left to fill use the Value and ValueSize to fill the rest. If
+ // we are aligning with nops, ask that target to emit the right data.
+ if (F.hasAlignEmitNops()) {
+ if (!Asm.getBackend().writeNopData(OS, Count, F.getSubtargetInfo()))
+ report_fatal_error("unable to write nop sequence of " + Twine(Count) +
+ " bytes");
+ } else {
+ // Otherwise, write out in multiples of the value size.
+ for (uint64_t i = 0; i != Count; ++i) {
+ switch (F.getAlignFillLen()) {
+ default:
+ llvm_unreachable("Invalid size!");
+ case 1:
+ OS << char(F.getAlignFill());
+ break;
+ case 2:
+ support::endian::write<uint16_t>(OS, F.getAlignFill(), Endian);
+ break;
+ case 4:
+ support::endian::write<uint32_t>(OS, F.getAlignFill(), Endian);
+ break;
+ case 8:
+ support::endian::write<uint64_t>(OS, F.getAlignFill(), Endian);
+ break;
+ }
+ }
}
}
- break;
- }
+ } break;
case MCFragment::FT_Fill: {
++stats::EmittedFillFragments;
@@ -608,9 +606,7 @@ void MCAssembler::writeSectionData(raw_ostream &OS,
case MCFragment::FT_Align:
// Check that we aren't trying to write a non-zero value into a virtual
// section.
- assert((cast<MCAlignFragment>(F).getFillLen() == 0 ||
- cast<MCAlignFragment>(F).getFill() == 0) &&
- "Invalid align in virtual section!");
+ assert(F.getAlignFill() == 0 && "Invalid align in virtual section!");
break;
case MCFragment::FT_Fill:
assert((cast<MCFillFragment>(F).getValue() == 0) &&
@@ -699,17 +695,22 @@ void MCAssembler::layout() {
for (MCSection &Sec : *this) {
for (MCFragment &F : Sec) {
// Process fragments with fixups here.
- if (F.isEncoded()) {
- auto Contents = F.getContents();
- for (MCFixup &Fixup : F.getFixups()) {
- uint64_t FixedValue;
- MCValue Target;
- evaluateFixup(F, Fixup, Target, FixedValue,
- /*RecordReloc=*/true, Contents);
- }
- // In the variable part, fixup offsets are relative to the fixed part's
- // start. Extend the variable contents to the left to account for the
- // fixed part size.
+ auto Contents = F.getContents();
+ for (MCFixup &Fixup : F.getFixups()) {
+ uint64_t FixedValue;
+ MCValue Target;
+ evaluateFixup(F, Fixup, Target, FixedValue,
+ /*RecordReloc=*/true, Contents);
+ }
+ if (F.getKind() == MCFragment::FT_Align) {
+ // For RISC-V linker relaxation, an alignment relocation might be
+ // needed.
+ if (F.hasAlignEmitNops())
+ getBackend().shouldInsertFixupForCodeAlign(*this, F);
+ } else if (F.getVarFixups().size()) {
+ // In the variable part, fixup offsets are relative to the fixed
+ // part's start. Extend the variable contents to the left to account
+ // for the fixed part size.
Contents = MutableArrayRef(F.getParent()->ContentStorage)
.slice(F.VarContentStart - Contents.size(), F.getSize());
for (MCFixup &Fixup : F.getVarFixups()) {
@@ -718,11 +719,6 @@ void MCAssembler::layout() {
evaluateFixup(F, Fixup, Target, FixedValue,
/*RecordReloc=*/true, Contents);
}
- } else if (auto *AF = dyn_cast<MCAlignFragment>(&F)) {
- // For RISC-V linker relaxation, an alignment relocation might be
- // needed.
- if (AF->hasEmitNops())
- getBackend().shouldInsertFixupForCodeAlign(*this, *AF);
}
}
}
diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp
index 22dff497911de..f0f1bd485258f 100644
--- a/llvm/lib/MC/MCExpr.cpp
+++ b/llvm/lib/MC/MCExpr.cpp
@@ -379,11 +379,11 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
// After layout, during relocation generation, it can be treated as a
// data fragment.
Displacement += F->getSize();
- } else if (auto *AF = dyn_cast<MCAlignFragment>(F);
- AF && Layout && AF->hasEmitNops() &&
+ } else if (F->getKind() == MCFragment::FT_Align && Layout &&
+ F->hasAlignEmitNops() &&
!Asm->getBackend().shouldInsertExtraNopBytesForCodeAlign(
- *AF, Count)) {
- Displacement += Asm->computeFragmentSize(*AF);
+ *F, Count)) {
+ Displacement += Asm->computeFragmentSize(*F);
} else if (auto *FF = dyn_cast<MCFillFragment>(F);
FF && FF->getNumValues().evaluateAsAbsolute(Num)) {
Displacement += Num * FF->getValueSize();
diff --git a/llvm/lib/MC/MCFragment.cpp b/llvm/lib/MC/MCFragment.cpp
index bfe045abe6e53..d01660f640c46 100644
--- a/llvm/lib/MC/MCFragment.cpp
+++ b/llvm/lib/MC/MCFragment.cpp
@@ -73,17 +73,9 @@ LLVM_DUMP_METHOD void MCFragment::dump() const {
};
switch (getKind()) {
- case MCFragment::FT_Align: {
- const auto *AF = cast<MCAlignFragment>(this);
- OS << " Align:" << AF->getAlignment().value() << " Fill:" << AF->getFill()
- << " FillLen:" << unsigned(AF->getFillLen())
- << " MaxBytesToEmit:" << AF->getMaxBytesToEmit();
- if (AF->hasEmitNops())
- OS << " Nops";
- break;
- }
case MCFragment::FT_Data:
case MCFragment::FT_Relaxable:
+ case MCFragment::FT_Align:
case MCFragment::FT_LEB:
case MCFragment::FT_Dwarf:
case MCFragment::FT_DwarfFrame: {
@@ -112,6 +104,13 @@ LLVM_DUMP_METHOD void MCFragment::dump() const {
OS << ' ';
getInst().dump_pretty(OS);
break;
+ case MCFragment::FT_Align:
+ OS << "\n Align:" << getAlignment().value() << " Fill:" << getAlignFill()
+ << " FillLen:" << unsigned(getAlignFillLen())
+ << " MaxBytesToEmit:" << getAlignMaxBytesToEmit();
+ if (hasAlignEmitNops())
+ OS << " Nops";
+ break;
case MCFragment::FT_LEB: {
OS << " Value:";
getLEBValue().print(OS, nullptr);
diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp
index c0cef0f06c57a..056b30b7df6f2 100644
--- a/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/llvm/lib/MC/MCObjectStreamer.cpp
@@ -579,8 +579,8 @@ void MCObjectStreamer::emitValueToAlignment(Align Alignment, int64_t Fill,
unsigned MaxBytesToEmit) {
if (MaxBytesToEmit == 0)
MaxBytesToEmit = Alignment.value();
- insert(getContext().allocFragment<MCAlignFragment>(Alignment, Fill, FillLen,
- MaxBytesToEmit));
+ MCFragment *F = getOrCreateDataFragment();
+ F->makeAlign(Alignment, Fill, FillLen, MaxBytesToEmit);
// Update the maximum alignment on the current section if necessary.
MCSection *CurSec = getCurrentSectionOnly();
@@ -591,8 +591,8 @@ void MCObjectStreamer::emitCodeAlignment(Align Alignment,
const MCSubtargetInfo *STI,
unsigned MaxBytesToEmit) {
emitValueToAlignment(Alignment, 0, 1, MaxBytesToEmit);
- auto *F = cast<MCAlignFragment>(getCurrentFragment());
- F->setEmitNops(true, STI);
+ auto *F = getCurrentFragment();
+ F->setAlignEmitNops(true, STI);
// With RISC-V style linker relaxation, mark the section as linker-relaxable
// if the alignment is larger than the minimum NOP size.
unsigned Size;
diff --git a/llvm/lib/MC/WasmObjectWriter.cpp b/llvm/lib/MC/WasmObjectWriter.cpp
index 7af240a73f952..c0fad137b9037 100644
--- a/llvm/lib/MC/WasmObjectWriter.cpp
+++ b/llvm/lib/MC/WasmObjectWriter.cpp
@@ -696,14 +696,15 @@ static void addData(SmallVectorImpl<char> &DataBytes,
if (Frag.hasInstructions())
report_fatal_error("only data supported in data sections");
- if (auto *Align = dyn_cast<MCAlignFragment>(&Frag)) {
- if (Align->getFillLen() != 1)
+ llvm::append_range(DataBytes, Frag.getContents());
+ if (Frag.getKind() == MCFragment::FT_Align) {
+ if (Frag.getAlignFillLen() != 1)
report_fatal_error("only byte values supported for alignment");
// If nops are requested, use zeros, as this is the data section.
- uint8_t Value = Align->hasEmitNops() ? 0 : Align->getFill();
+ uint8_t Value = Frag.hasAlignEmitNops() ? 0 : Frag.getAlignFill();
uint64_t Size =
- std::min<uint64_t>(alignTo(DataBytes.size(), Align->getAlignment()),
- DataBytes.size() + Align->getMaxBytesToEmit());
+ std::min<uint64_t>(alignTo(DataBytes.size(), Frag.getAlignment()),
+ DataBytes.size() + Frag.getAlignMaxBytesToEmit());
DataBytes.resize(Size, Value);
} else if (auto *Fill = dyn_cast<MCFillFragment>(&Frag)) {
int64_t NumValues;
@@ -711,12 +712,10 @@ static void addData(SmallVectorImpl<char> &DataBytes,
llvm_unreachable("The fill should be an assembler constant");
DataBytes.insert(DataBytes.end(), Fill->getValueSize() * NumValues,
Fill->getValue());
+ } else if (Frag.getKind() == MCFragment::FT_LEB) {
+ llvm::append_range(DataBytes, Frag.getVarContents());
} else {
- llvm::append_range(DataBytes, Frag.getContents());
- if (Frag.getKind() == MCFragment::FT_LEB)
- llvm::append_range(DataBytes, Frag.getVarContents());
- else
- assert(Frag.getKind() == MCFragment::FT_Data);
+ assert(Frag.getKind() == MCFragment::FT_Data);
}
}
@@ -1867,8 +1866,7 @@ uint64_t WasmObjectWriter::writeOneObject(MCAssembler &Asm,
const MCFragment &AlignFrag = *nextFrag;
if (AlignFrag.getKind() != MCFragment::FT_Align)
report_fatal_error(".init_array section should be aligned");
- if (cast<MCAlignFragment>(AlignFrag).getAlignment() !=
- Align(is64Bit() ? 8 : 4))
+ if (AlignFrag.getAlignment() != Align(is64Bit() ? 8 : 4))
report_fatal_error(
".init_array section should be aligned for pointers");
diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
index 7b9f1156f9102..9ed17a9b39516 100644
--- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
+++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
@@ -182,14 +182,14 @@ void LoongArchAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
// could satisfy alignment by removing Nops.
// The function returns the total Nops Size we need to insert.
bool LoongArchAsmBackend::shouldInsertExtraNopBytesForCodeAlign(
- const MCAlignFragment &AF, unsigned &Size) {
+ const MCFragment &AF, unsigned &Size) {
// Calculate Nops Size only when linker relaxation enabled.
if (!AF.getSubtargetInfo(...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/149030
More information about the llvm-commits
mailing list