[llvm] 07d2709 - Revert "[MC] Compute fragment offsets eagerly"
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 31 00:07:34 PDT 2024
Author: Nikita Popov
Date: 2024-07-31T09:06:43+02:00
New Revision: 07d2709a17860a202d91781769a88837e4fb5f2a
URL: https://github.com/llvm/llvm-project/commit/07d2709a17860a202d91781769a88837e4fb5f2a
DIFF: https://github.com/llvm/llvm-project/commit/07d2709a17860a202d91781769a88837e4fb5f2a.diff
LOG: Revert "[MC] Compute fragment offsets eagerly"
This reverts commit be5a845e4c29aadb513ae6e5e2879dccf37efdbb.
This causes large code size regressions, which were not present
in the initial version of this change.
Added:
Modified:
llvm/include/llvm/MC/MCAsmBackend.h
llvm/include/llvm/MC/MCAssembler.h
llvm/include/llvm/MC/MCSection.h
llvm/lib/MC/MCAssembler.cpp
llvm/lib/MC/MCSection.cpp
llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
llvm/test/MC/ELF/relax-recompute-align.s
Removed:
################################################################################
diff --git a/llvm/include/llvm/MC/MCAsmBackend.h b/llvm/include/llvm/MC/MCAsmBackend.h
index d1d1814dd8b52f..3f88ac02cd92ac 100644
--- a/llvm/include/llvm/MC/MCAsmBackend.h
+++ b/llvm/include/llvm/MC/MCAsmBackend.h
@@ -217,9 +217,8 @@ class MCAsmBackend {
virtual bool writeNopData(raw_ostream &OS, uint64_t Count,
const MCSubtargetInfo *STI) const = 0;
- // Return true if fragment offsets have been adjusted and an extra layout
- // iteration is needed.
- virtual bool finishLayout(const MCAssembler &Asm) const { return false; }
+ /// Give backend an opportunity to finish layout after relaxation
+ virtual void finishLayout(MCAssembler const &Asm) const {}
/// Handle any target-specific assembler flags. By default, do nothing.
virtual void handleAssemblerFlag(MCAssemblerFlag Flag) {}
diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h
index 49e2e1e776d267..c6fa48128d1891 100644
--- a/llvm/include/llvm/MC/MCAssembler.h
+++ b/llvm/include/llvm/MC/MCAssembler.h
@@ -65,7 +65,6 @@ class MCAssembler {
bool HasLayout = false;
bool RelaxAll = false;
- unsigned RelaxSteps = 0;
SectionListType Sections;
@@ -114,18 +113,19 @@ class MCAssembler {
/// Perform one layout iteration and return true if any offsets
/// were adjusted.
- bool relaxOnce();
-
- /// Perform relaxation on a single fragment.
- void relaxFragment(MCFragment &F);
- void relaxInstruction(MCRelaxableFragment &IF);
- void relaxLEB(MCLEBFragment &IF);
- void relaxBoundaryAlign(MCBoundaryAlignFragment &BF);
- void relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF);
- void relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF);
- void relaxCVInlineLineTable(MCCVInlineLineTableFragment &DF);
- void relaxCVDefRange(MCCVDefRangeFragment &DF);
- void relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &DF);
+ bool layoutOnce();
+
+ /// Perform relaxation on a single fragment - returns true if the fragment
+ /// changes as a result of relaxation.
+ bool relaxFragment(MCFragment &F);
+ bool relaxInstruction(MCRelaxableFragment &IF);
+ bool relaxLEB(MCLEBFragment &IF);
+ bool relaxBoundaryAlign(MCBoundaryAlignFragment &BF);
+ bool relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF);
+ bool relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF);
+ bool relaxCVInlineLineTable(MCCVInlineLineTableFragment &DF);
+ bool relaxCVDefRange(MCCVDefRangeFragment &DF);
+ bool relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &DF);
std::tuple<MCValue, uint64_t, bool>
handleFixup(MCFragment &F, const MCFixup &Fixup, const MCSubtargetInfo *STI);
@@ -147,9 +147,10 @@ class MCAssembler {
uint64_t computeFragmentSize(const MCFragment &F) const;
void layoutBundle(MCFragment *Prev, MCFragment *F) const;
+ void ensureValid(MCSection &Sec) const;
// Get the offset of the given fragment inside its containing section.
- uint64_t getFragmentOffset(const MCFragment &F) const { return F.Offset; }
+ uint64_t getFragmentOffset(const MCFragment &F) const;
uint64_t getSectionAddressSize(const MCSection &Sec) const;
uint64_t getSectionFileSize(const MCSection &Sec) const;
diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h
index 1289d6f6f9f655..dcdcd094fa17bd 100644
--- a/llvm/include/llvm/MC/MCSection.h
+++ b/llvm/include/llvm/MC/MCSection.h
@@ -99,6 +99,8 @@ class MCSection {
/// Whether this section has had instructions emitted into it.
bool HasInstructions : 1;
+ bool HasLayout : 1;
+
bool IsRegistered : 1;
bool IsText : 1;
@@ -167,6 +169,9 @@ class MCSection {
bool hasInstructions() const { return HasInstructions; }
void setHasInstructions(bool Value) { HasInstructions = Value; }
+ bool hasLayout() const { return HasLayout; }
+ void setHasLayout(bool Value) { HasLayout = Value; }
+
bool isRegistered() const { return IsRegistered; }
void setIsRegistered(bool Value) { IsRegistered = Value; }
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index fb27ef30a46647..cbeb41f58c99be 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -261,11 +261,7 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
}
int64_t Size = NumValues * FF.getValueSize();
if (Size < 0) {
- // The expression might use symbol values which have not yet converged.
- // Allow the first few iterations to have temporary negative values. The
- // limit is somewhat arbitrary but allows contrived interdependency.
- if (RelaxSteps >= 2)
- getContext().reportError(FF.getLoc(), "invalid number of bytes");
+ getContext().reportError(FF.getLoc(), "invalid number of bytes");
return 0;
}
return Size;
@@ -432,6 +428,28 @@ void MCAssembler::layoutBundle(MCFragment *Prev, MCFragment *F) const {
DF->Offset = EF->Offset;
}
+void MCAssembler::ensureValid(MCSection &Sec) const {
+ if (Sec.hasLayout())
+ return;
+ Sec.setHasLayout(true);
+ MCFragment *Prev = nullptr;
+ uint64_t Offset = 0;
+ for (MCFragment &F : Sec) {
+ F.Offset = Offset;
+ if (isBundlingEnabled() && F.hasInstructions()) {
+ layoutBundle(Prev, &F);
+ Offset = F.Offset;
+ }
+ Offset += computeFragmentSize(F);
+ Prev = &F;
+ }
+}
+
+uint64_t MCAssembler::getFragmentOffset(const MCFragment &F) const {
+ ensureValid(*F.getParent());
+ return F.Offset;
+}
+
// Simple getSymbolOffset helper for the non-variable case.
static bool getLabelOffset(const MCAssembler &Asm, const MCSymbol &S,
bool ReportError, uint64_t &Val) {
@@ -911,38 +929,22 @@ void MCAssembler::layout() {
// Layout until everything fits.
this->HasLayout = true;
- for (MCSection &Sec : *this) {
- MCFragment *Prev = nullptr;
- uint64_t Offset = 0;
- for (MCFragment &F : Sec) {
- F.Offset = Offset;
- if (LLVM_UNLIKELY(isBundlingEnabled())) {
- if (F.hasInstructions()) {
- layoutBundle(Prev, &F);
- Offset = F.Offset;
- }
- Prev = &F;
- }
- Offset += computeFragmentSize(F);
- }
- }
- while (relaxOnce())
+ while (layoutOnce()) {
if (getContext().hadError())
return;
+ // Size of fragments in one section can depend on the size of fragments in
+ // another. If any fragment has changed size, we have to re-layout (and
+ // as a result possibly further relax) all.
+ for (MCSection &Sec : *this)
+ Sec.setHasLayout(false);
+ }
DEBUG_WITH_TYPE("mc-dump", {
errs() << "assembler backend - post-relaxation\n--\n";
dump(); });
- // Some targets might want to adjust fragment offsets. If so, perform another
- // relaxation loop.
- if (getBackend().finishLayout(*this))
- while (relaxOnce())
- if (getContext().hadError())
- return;
-
- // Trigger computeFragmentSize errors.
- RelaxSteps = UINT_MAX;
+ // Finalize the layout, including fragment lowering.
+ getBackend().finishLayout(*this);
DEBUG_WITH_TYPE("mc-dump", {
errs() << "assembler backend - final-layout\n--\n";
@@ -1071,11 +1073,11 @@ bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F) const {
return false;
}
-void MCAssembler::relaxInstruction(MCRelaxableFragment &F) {
+bool MCAssembler::relaxInstruction(MCRelaxableFragment &F) {
assert(getEmitterPtr() &&
"Expected CodeEmitter defined for relaxInstruction");
if (!fragmentNeedsRelaxation(&F))
- return;
+ return false;
++stats::RelaxedInstructions;
@@ -1093,9 +1095,10 @@ void MCAssembler::relaxInstruction(MCRelaxableFragment &F) {
F.getContents().clear();
getEmitter().encodeInstruction(Relaxed, F.getContents(), F.getFixups(),
*F.getSubtargetInfo());
+ return true;
}
-void MCAssembler::relaxLEB(MCLEBFragment &LF) {
+bool MCAssembler::relaxLEB(MCLEBFragment &LF) {
const unsigned OldSize = static_cast<unsigned>(LF.getContents().size());
unsigned PadTo = OldSize;
int64_t Value;
@@ -1131,6 +1134,7 @@ void MCAssembler::relaxLEB(MCLEBFragment &LF) {
encodeSLEB128(Value, OSE, PadTo);
else
encodeULEB128(Value, OSE, PadTo);
+ return OldSize != LF.getContents().size();
}
/// Check if the branch crosses the boundary.
@@ -1170,11 +1174,11 @@ static bool needPadding(uint64_t StartAddr, uint64_t Size,
isAgainstBoundary(StartAddr, Size, BoundaryAlignment);
}
-void MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment &BF) {
+bool MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment &BF) {
// BoundaryAlignFragment that doesn't need to align any fragment should not be
// relaxed.
if (!BF.getLastFragment())
- return;
+ return false;
uint64_t AlignedOffset = getFragmentOffset(BF);
uint64_t AlignedSize = 0;
@@ -1188,15 +1192,19 @@ void MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment &BF) {
uint64_t NewSize = needPadding(AlignedOffset, AlignedSize, BoundaryAlignment)
? offsetToAlignment(AlignedOffset, BoundaryAlignment)
: 0U;
+ if (NewSize == BF.getSize())
+ return false;
BF.setSize(NewSize);
+ return true;
}
-void MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF) {
+bool MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF) {
bool WasRelaxed;
if (getBackend().relaxDwarfLineAddr(*this, DF, WasRelaxed))
- return;
+ return WasRelaxed;
MCContext &Context = getContext();
+ uint64_t OldSize = DF.getContents().size();
int64_t AddrDelta;
bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, *this);
assert(Abs && "We created a line delta with an invalid expression");
@@ -1209,12 +1217,13 @@ void MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF) {
MCDwarfLineAddr::encode(Context, getDWARFLinetableParams(), LineDelta,
AddrDelta, Data);
+ return OldSize != Data.size();
}
-void MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF) {
+bool MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF) {
bool WasRelaxed;
if (getBackend().relaxDwarfCFA(*this, DF, WasRelaxed))
- return;
+ return WasRelaxed;
MCContext &Context = getContext();
int64_t Value;
@@ -1223,25 +1232,31 @@ void MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF) {
getContext().reportError(DF.getAddrDelta().getLoc(),
"invalid CFI advance_loc expression");
DF.setAddrDelta(MCConstantExpr::create(0, Context));
- return;
+ return false;
}
SmallVectorImpl<char> &Data = DF.getContents();
+ uint64_t OldSize = Data.size();
Data.clear();
DF.getFixups().clear();
MCDwarfFrameEmitter::encodeAdvanceLoc(Context, Value, Data);
+ return OldSize != Data.size();
}
-void MCAssembler::relaxCVInlineLineTable(MCCVInlineLineTableFragment &F) {
+bool MCAssembler::relaxCVInlineLineTable(MCCVInlineLineTableFragment &F) {
+ unsigned OldSize = F.getContents().size();
getContext().getCVContext().encodeInlineLineTable(*this, F);
+ return OldSize != F.getContents().size();
}
-void MCAssembler::relaxCVDefRange(MCCVDefRangeFragment &F) {
+bool MCAssembler::relaxCVDefRange(MCCVDefRangeFragment &F) {
+ unsigned OldSize = F.getContents().size();
getContext().getCVContext().encodeDefRange(*this, F);
+ return OldSize != F.getContents().size();
}
-void MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &PF) {
+bool MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &PF) {
uint64_t OldSize = PF.getContents().size();
int64_t AddrDelta;
bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, *this);
@@ -1254,12 +1269,13 @@ void MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &PF) {
// AddrDelta is a signed integer
encodeSLEB128(AddrDelta, OSE, OldSize);
+ return OldSize != Data.size();
}
-void MCAssembler::relaxFragment(MCFragment &F) {
+bool MCAssembler::relaxFragment(MCFragment &F) {
switch(F.getKind()) {
default:
- return;
+ return false;
case MCFragment::FT_Relaxable:
assert(!getRelaxAll() &&
"Did not expect a MCRelaxableFragment in RelaxAll mode");
@@ -1281,57 +1297,15 @@ void MCAssembler::relaxFragment(MCFragment &F) {
}
}
-bool MCAssembler::relaxOnce() {
+bool MCAssembler::layoutOnce() {
++stats::RelaxationSteps;
- ++RelaxSteps;
-
- // Size of fragments in one section can depend on the size of fragments in
- // another. If any fragment has changed size, we have to re-layout (and
- // as a result possibly further relax) all sections.
- bool ChangedAny = false, Changed;
- for (MCSection &Sec : *this) {
- // Assume each iteration finalizes at least one extra fragment. If the
- // layout does not converge after N+1 iterations, bail out.
- auto MaxIter = Sec.curFragList()->Tail->getLayoutOrder() + 1;
- uint64_t OldSize = getSectionAddressSize(Sec);
- do {
- uint64_t Offset = 0;
- Changed = false;
- if (LLVM_UNLIKELY(isBundlingEnabled())) {
- MCFragment *Prev = nullptr;
- for (MCFragment &F : Sec) {
- F.Offset = Offset;
- relaxFragment(F);
- if (F.hasInstructions()) {
- layoutBundle(Prev, &F);
- Offset = F.Offset;
- }
- Prev = &F;
- if (F.Offset != Offset) {
- F.Offset = Offset;
- Changed = true;
- }
- Offset += computeFragmentSize(F);
- }
- } else {
- for (MCFragment &F : Sec) {
- if (F.Offset != Offset) {
- F.Offset = Offset;
- Changed = true;
- }
- relaxFragment(F);
- Offset += computeFragmentSize(F);
- }
- }
- Changed |= OldSize != Offset;
- ChangedAny |= Changed;
- OldSize = Offset;
- } while (Changed && --MaxIter);
- if (MaxIter == 0)
- return false;
- }
- return ChangedAny;
+ bool Changed = false;
+ for (MCSection &Sec : *this)
+ for (MCFragment &Frag : Sec)
+ if (relaxFragment(Frag))
+ Changed = true;
+ return Changed;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
diff --git a/llvm/lib/MC/MCSection.cpp b/llvm/lib/MC/MCSection.cpp
index 97e87a41c8ce5b..8c2ee5635a49c1 100644
--- a/llvm/lib/MC/MCSection.cpp
+++ b/llvm/lib/MC/MCSection.cpp
@@ -23,8 +23,8 @@ using namespace llvm;
MCSection::MCSection(SectionVariant V, StringRef Name, bool IsText,
bool IsVirtual, MCSymbol *Begin)
: Begin(Begin), BundleGroupBeforeFirstInst(false), HasInstructions(false),
- IsRegistered(false), IsText(IsText), IsVirtual(IsVirtual), Name(Name),
- Variant(V) {
+ HasLayout(false), IsRegistered(false), IsText(IsText),
+ IsVirtual(IsVirtual), Name(Name), Variant(V) {
DummyFragment.setParent(this);
// The initial subsection number is 0. Create a fragment list.
CurFragList = &Subsections.emplace_back(0u, FragList{}).second;
diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
index 1570493b765ca8..6acc37e599f2ec 100644
--- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
+++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp
@@ -702,7 +702,7 @@ class HexagonAsmBackend : public MCAsmBackend {
return true;
}
- bool finishLayout(const MCAssembler &Asm) const override {
+ void finishLayout(MCAssembler const &Asm) const override {
SmallVector<MCFragment *> Frags;
for (MCSection &Sec : Asm) {
Frags.clear();
@@ -747,6 +747,7 @@ class HexagonAsmBackend : public MCAsmBackend {
//assert(!Error);
(void)Error;
ReplaceInstruction(Asm.getEmitter(), RF, Inst);
+ Sec.setHasLayout(false);
Size = 0; // Only look back one instruction
break;
}
@@ -756,7 +757,6 @@ class HexagonAsmBackend : public MCAsmBackend {
}
}
}
- return true;
}
}; // class HexagonAsmBackend
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 22711a7d05bf7a..cf0cb92424c166 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -201,7 +201,7 @@ class X86AsmBackend : public MCAsmBackend {
bool padInstructionEncoding(MCRelaxableFragment &RF, MCCodeEmitter &Emitter,
unsigned &RemainingSize) const;
- bool finishLayout(const MCAssembler &Asm) const override;
+ void finishLayout(const MCAssembler &Asm) const override;
unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const override;
@@ -854,7 +854,7 @@ bool X86AsmBackend::padInstructionEncoding(MCRelaxableFragment &RF,
return Changed;
}
-bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
+void X86AsmBackend::finishLayout(MCAssembler const &Asm) const {
// See if we can further relax some instructions to cut down on the number of
// nop bytes required for code alignment. The actual win is in reducing
// instruction count, not number of bytes. Modern X86-64 can easily end up
@@ -862,7 +862,7 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
// (i.e. eliminate nops) even at the cost of increasing the size and
// complexity of others.
if (!X86PadForAlign && !X86PadForBranchAlign)
- return false;
+ return;
// The processed regions are delimitered by LabeledFragments. -g may have more
// MCSymbols and therefore
diff erent relaxation results. X86PadForAlign is
@@ -871,7 +871,6 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
for (const MCSymbol &S : Asm.symbols())
LabeledFragments.insert(S.getFragment(false));
- bool Changed = false;
for (MCSection &Sec : Asm) {
if (!Sec.isText())
continue;
@@ -908,6 +907,9 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
continue;
}
+#ifndef NDEBUG
+ const uint64_t OrigOffset = Asm.getFragmentOffset(F);
+#endif
const uint64_t OrigSize = Asm.computeFragmentSize(F);
// To keep the effects local, prefer to relax instructions closest to
@@ -920,7 +922,8 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
// Give the backend a chance to play any tricks it wishes to increase
// the encoding size of the given instruction. Target independent code
// will try further relaxation, but target's may play further tricks.
- Changed |= padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize);
+ if (padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize))
+ Sec.setHasLayout(false);
// If we have an instruction which hasn't been fully relaxed, we can't
// skip past it and insert bytes before it. Changing its starting
@@ -933,12 +936,22 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
}
Relaxable.clear();
+ // BoundaryAlign explicitly tracks it's size (unlike align)
+ if (F.getKind() == MCFragment::FT_BoundaryAlign)
+ cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
+
+#ifndef NDEBUG
+ const uint64_t FinalOffset = Asm.getFragmentOffset(F);
+ const uint64_t FinalSize = Asm.computeFragmentSize(F);
+ assert(OrigOffset + OrigSize == FinalOffset + FinalSize &&
+ "can't move start of next fragment!");
+ assert(FinalSize == RemainingSize && "inconsistent size computation?");
+#endif
+
// If we're looking at a boundary align, make sure we don't try to pad
// its target instructions for some following directive. Doing so would
// break the alignment of the current boundary align.
if (auto *BF = dyn_cast<MCBoundaryAlignFragment>(&F)) {
- cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
- Changed = true;
const MCFragment *LastFragment = BF->getLastFragment();
if (!LastFragment)
continue;
@@ -948,7 +961,11 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
}
}
- return Changed;
+ // The layout is done. Mark every fragment as valid.
+ for (MCSection &Section : Asm) {
+ Asm.getFragmentOffset(*Section.curFragList()->Tail);
+ Asm.computeFragmentSize(*Section.curFragList()->Tail);
+ }
}
unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const {
diff --git a/llvm/test/MC/ELF/relax-recompute-align.s b/llvm/test/MC/ELF/relax-recompute-align.s
index 508fb5969b3b50..44e1f1f9719970 100644
--- a/llvm/test/MC/ELF/relax-recompute-align.s
+++ b/llvm/test/MC/ELF/relax-recompute-align.s
@@ -1,14 +1,14 @@
// RUN: llvm-mc -filetype=obj -triple i386 %s -o - | llvm-objdump -d --no-show-raw-insn - | FileCheck %s
-// This is a case where llvm-mc computes a better layout than Darwin 'as'. This
+/// This is a case where the computed layout is not optimal. The
// issue is that after the first jmp slides, the .align size must be
// recomputed -- otherwise the second jump will appear to be out-of-range for a
// 1-byte jump.
// CHECK: int3
-// CHECK-NEXT: ce: int3
-// CHECK: d0: pushal
-// CHECK: 130: jl 0xd0
+// CHECK-NEXT: d2: int3
+// CHECK: e0: pushal
+// CHECK: 140: jl 0xe0
L0:
.space 0x8a, 0x90
More information about the llvm-commits
mailing list