[llvm] 4eb5450 - Revert "[MC] Compute fragment offsets eagerly"
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 30 14:52:33 PDT 2024
Author: Fangrui Song
Date: 2024-07-30T14:52:29-07:00
New Revision: 4eb5450f630849ee0518487de38d857fbe5b1aee
URL: https://github.com/llvm/llvm-project/commit/4eb5450f630849ee0518487de38d857fbe5b1aee
DIFF: https://github.com/llvm/llvm-project/commit/4eb5450f630849ee0518487de38d857fbe5b1aee.diff
LOG: Revert "[MC] Compute fragment offsets eagerly"
This reverts commit 1a47f3f3db66589c11f8ddacfeaecc03fb80c510.
Fix #100283
This commit is actually a trigger of other preexisting problems:
* Size change of fill fragments does not influence the fixed-point iteration.
* The `invalid number of bytes` error is reported too early. Since
`.zero A-B` might have temporary negative values in the first few
iterations.
However, the problems appeared at least "benign" (did not affect the
Linux kernel builds) before this commit.
Added:
Modified:
llvm/include/llvm/MC/MCAsmBackend.h
llvm/include/llvm/MC/MCAssembler.h
llvm/include/llvm/MC/MCSection.h
llvm/lib/MC/MCAssembler.cpp
llvm/lib/MC/MCSection.cpp
llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/MC/MCAsmBackend.h b/llvm/include/llvm/MC/MCAsmBackend.h
index d1d1814dd8b52..3f88ac02cd92a 100644
--- a/llvm/include/llvm/MC/MCAsmBackend.h
+++ b/llvm/include/llvm/MC/MCAsmBackend.h
@@ -217,9 +217,8 @@ class MCAsmBackend {
virtual bool writeNopData(raw_ostream &OS, uint64_t Count,
const MCSubtargetInfo *STI) const = 0;
- // Return true if fragment offsets have been adjusted and an extra layout
- // iteration is needed.
- virtual bool finishLayout(const MCAssembler &Asm) const { return false; }
+ /// Give backend an opportunity to finish layout after relaxation
+ virtual void finishLayout(MCAssembler const &Asm) const {}
/// Handle any target-specific assembler flags. By default, do nothing.
virtual void handleAssemblerFlag(MCAssemblerFlag Flag) {}
diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h
index d9752912ee66a..c6fa48128d189 100644
--- a/llvm/include/llvm/MC/MCAssembler.h
+++ b/llvm/include/llvm/MC/MCAssembler.h
@@ -111,7 +111,6 @@ class MCAssembler {
/// Check whether the given fragment needs relaxation.
bool fragmentNeedsRelaxation(const MCRelaxableFragment *IF) const;
- void layoutSection(MCSection &Sec);
/// Perform one layout iteration and return true if any offsets
/// were adjusted.
bool layoutOnce();
@@ -148,9 +147,10 @@ class MCAssembler {
uint64_t computeFragmentSize(const MCFragment &F) const;
void layoutBundle(MCFragment *Prev, MCFragment *F) const;
+ void ensureValid(MCSection &Sec) const;
// Get the offset of the given fragment inside its containing section.
- uint64_t getFragmentOffset(const MCFragment &F) const { return F.Offset; }
+ uint64_t getFragmentOffset(const MCFragment &F) const;
uint64_t getSectionAddressSize(const MCSection &Sec) const;
uint64_t getSectionFileSize(const MCSection &Sec) const;
diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h
index 1289d6f6f9f65..dcdcd094fa17b 100644
--- a/llvm/include/llvm/MC/MCSection.h
+++ b/llvm/include/llvm/MC/MCSection.h
@@ -99,6 +99,8 @@ class MCSection {
/// Whether this section has had instructions emitted into it.
bool HasInstructions : 1;
+ bool HasLayout : 1;
+
bool IsRegistered : 1;
bool IsText : 1;
@@ -167,6 +169,9 @@ class MCSection {
bool hasInstructions() const { return HasInstructions; }
void setHasInstructions(bool Value) { HasInstructions = Value; }
+ bool hasLayout() const { return HasLayout; }
+ void setHasLayout(bool Value) { HasLayout = Value; }
+
bool isRegistered() const { return IsRegistered; }
void setIsRegistered(bool Value) { IsRegistered = Value; }
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index f6a3182a7868c..cbeb41f58c99b 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -428,6 +428,28 @@ void MCAssembler::layoutBundle(MCFragment *Prev, MCFragment *F) const {
DF->Offset = EF->Offset;
}
+void MCAssembler::ensureValid(MCSection &Sec) const {
+ if (Sec.hasLayout())
+ return;
+ Sec.setHasLayout(true);
+ MCFragment *Prev = nullptr;
+ uint64_t Offset = 0;
+ for (MCFragment &F : Sec) {
+ F.Offset = Offset;
+ if (isBundlingEnabled() && F.hasInstructions()) {
+ layoutBundle(Prev, &F);
+ Offset = F.Offset;
+ }
+ Offset += computeFragmentSize(F);
+ Prev = &F;
+ }
+}
+
+uint64_t MCAssembler::getFragmentOffset(const MCFragment &F) const {
+ ensureValid(*F.getParent());
+ return F.Offset;
+}
+
// Simple getSymbolOffset helper for the non-variable case.
static bool getLabelOffset(const MCAssembler &Asm, const MCSymbol &S,
bool ReportError, uint64_t &Val) {
@@ -907,20 +929,22 @@ void MCAssembler::layout() {
// Layout until everything fits.
this->HasLayout = true;
- for (MCSection &Sec : *this)
- layoutSection(Sec);
while (layoutOnce()) {
+ if (getContext().hadError())
+ return;
+ // Size of fragments in one section can depend on the size of fragments in
+ // another. If any fragment has changed size, we have to re-layout (and
+ // as a result possibly further relax) all.
+ for (MCSection &Sec : *this)
+ Sec.setHasLayout(false);
}
DEBUG_WITH_TYPE("mc-dump", {
errs() << "assembler backend - post-relaxation\n--\n";
dump(); });
- // Some targets might want to adjust fragment offsets. If so, perform another
- // layout loop.
- if (getBackend().finishLayout(*this))
- for (MCSection &Sec : *this)
- layoutSection(Sec);
+ // Finalize the layout, including fragment lowering.
+ getBackend().finishLayout(*this);
DEBUG_WITH_TYPE("mc-dump", {
errs() << "assembler backend - final-layout\n--\n";
@@ -1273,42 +1297,15 @@ bool MCAssembler::relaxFragment(MCFragment &F) {
}
}
-void MCAssembler::layoutSection(MCSection &Sec) {
- MCFragment *Prev = nullptr;
- uint64_t Offset = 0;
- for (MCFragment &F : Sec) {
- F.Offset = Offset;
- if (LLVM_UNLIKELY(isBundlingEnabled())) {
- if (F.hasInstructions()) {
- layoutBundle(Prev, &F);
- Offset = F.Offset;
- }
- Prev = &F;
- }
- Offset += computeFragmentSize(F);
- }
-}
-
bool MCAssembler::layoutOnce() {
++stats::RelaxationSteps;
- // Size of fragments in one section can depend on the size of fragments in
- // another. If any fragment has changed size, we have to re-layout (and
- // as a result possibly further relax) all.
- bool ChangedAny = false;
- for (MCSection &Sec : *this) {
- for (;;) {
- bool Changed = false;
- for (MCFragment &F : Sec)
- if (relaxFragment(F))
- Changed = true;
- ChangedAny |= Changed;
- if (!Changed)
- break;
- layoutSection(Sec);
- }
- }
- return ChangedAny;
+ bool Changed = false;
+ for (MCSection &Sec : *this)
+ for (MCFragment &Frag : Sec)
+ if (relaxFragment(Frag))
+ Changed = true;
+ return Changed;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
diff --git a/llvm/lib/MC/MCSection.cpp b/llvm/lib/MC/MCSection.cpp
index 97e87a41c8ce5..8c2ee5635a49c 100644
--- a/llvm/lib/MC/MCSection.cpp
+++ b/llvm/lib/MC/MCSection.cpp
@@ -23,8 +23,8 @@ using namespace llvm;
MCSection::MCSection(SectionVariant V, StringRef Name, bool IsText,
bool IsVirtual, MCSymbol *Begin)
: Begin(Begin), BundleGroupBeforeFirstInst(false), HasInstructions(false),
- IsRegistered(false), IsText(IsText), IsVirtual(IsVirtual), Name(Name),
- Variant(V) {
+ HasLayout(false), IsRegistered(false), IsText(IsText),
+ IsVirtual(IsVirtual), Name(Name), Variant(V) {
DummyFragment.setParent(this);
// The initial subsection number is 0. Create a fragment list.
CurFragList = &Subsections.emplace_back(0u, FragList{}).second;
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
index 1570493b765ca..6acc37e599f2e 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
@@ -702,7 +702,7 @@ class HexagonAsmBackend : public MCAsmBackend {
return true;
}
- bool finishLayout(const MCAssembler &Asm) const override {
+ void finishLayout(MCAssembler const &Asm) const override {
SmallVector<MCFragment *> Frags;
for (MCSection &Sec : Asm) {
Frags.clear();
@@ -747,6 +747,7 @@ class HexagonAsmBackend : public MCAsmBackend {
//assert(!Error);
(void)Error;
ReplaceInstruction(Asm.getEmitter(), RF, Inst);
+ Sec.setHasLayout(false);
Size = 0; // Only look back one instruction
break;
}
@@ -756,7 +757,6 @@ class HexagonAsmBackend : public MCAsmBackend {
}
}
}
- return true;
}
}; // class HexagonAsmBackend
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index da9fc25779294..cf0cb92424c16 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -201,7 +201,7 @@ class X86AsmBackend : public MCAsmBackend {
bool padInstructionEncoding(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
unsigned &RemainingSize) const;
- bool finishLayout(const MCAssembler &Asm) const override;
+ void finishLayout(const MCAssembler &Asm) const override;
unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const override;
@@ -854,7 +854,7 @@ bool X86AsmBackend::padInstructionEncoding(MCRelaxableFragment &RF,
return Changed;
}
-bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
+void X86AsmBackend::finishLayout(MCAssembler const &Asm) const {
// See if we can further relax some instructions to cut down on the number of
// nop bytes required for code alignment. The actual win is in reducing
// instruction count, not number of bytes. Modern X86-64 can easily end up
@@ -862,7 +862,7 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
// (i.e. eliminate nops) even at the cost of increasing the size and
// complexity of others.
if (!X86PadForAlign && !X86PadForBranchAlign)
- return false;
+ return;
// The processed regions are delimitered by LabeledFragments. -g may have more
// MCSymbols and therefore
diff erent relaxation results. X86PadForAlign is
@@ -907,6 +907,9 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
continue;
}
+#ifndef NDEBUG
+ const uint64_t OrigOffset = Asm.getFragmentOffset(F);
+#endif
const uint64_t OrigSize = Asm.computeFragmentSize(F);
// To keep the effects local, prefer to relax instructions closest to
@@ -919,7 +922,8 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
// Give the backend a chance to play any tricks it wishes to increase
// the encoding size of the given instruction. Target independent code
// will try further relaxation, but target's may play further tricks.
- padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize);
+ if (padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize))
+ Sec.setHasLayout(false);
// If we have an instruction which hasn't been fully relaxed, we can't
// skip past it and insert bytes before it. Changing its starting
@@ -936,6 +940,14 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
if (F.getKind() == MCFragment::FT_BoundaryAlign)
cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
+#ifndef NDEBUG
+ const uint64_t FinalOffset = Asm.getFragmentOffset(F);
+ const uint64_t FinalSize = Asm.computeFragmentSize(F);
+ assert(OrigOffset + OrigSize == FinalOffset + FinalSize &&
+ "can't move start of next fragment!");
+ assert(FinalSize == RemainingSize && "inconsistent size computation?");
+#endif
+
// If we're looking at a boundary align, make sure we don't try to pad
// its target instructions for some following directive. Doing so would
// break the alignment of the current boundary align.
@@ -949,7 +961,11 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
}
}
- return true;
+ // The layout is done. Mark every fragment as valid.
+ for (MCSection &Section : Asm) {
+ Asm.getFragmentOffset(*Section.curFragList()->Tail);
+ Asm.computeFragmentSize(*Section.curFragList()->Tail);
+ }
}
unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const {
More information about the llvm-commits
mailing list