[llvm] fd6d6a7 - MC: Refactor FT_Align fragments when linker relaxation is enabled
via llvm-commits
llvm-commits at lists.llvm.org
Sun Jul 20 00:55:57 PDT 2025
Author: Fangrui Song
Date: 2025-07-20T00:55:54-07:00
New Revision: fd6d6a7c8d4e2fb196bd3707dc4022a236089d9a
URL: https://github.com/llvm/llvm-project/commit/fd6d6a7c8d4e2fb196bd3707dc4022a236089d9a
DIFF: https://github.com/llvm/llvm-project/commit/fd6d6a7c8d4e2fb196bd3707dc4022a236089d9a.diff
LOG: MC: Refactor FT_Align fragments when linker relaxation is enabled
Previously, two MCAsmBackend hooks were used, with
shouldInsertFixupForCodeAlign calling getWriter().recordRelocation
directly, bypassing generic code.
This patch:
* Introduces MCAsmBackend::relaxAlign to replace the two hooks.
* Tracks padding size using VarContentEnd (content is ignored).
* Move setLinkerRelaxable from MCObjectStreamer::emitCodeAlignment to the backends.
Pull Request: https://github.com/llvm/llvm-project/pull/149465
Added:
Modified:
llvm/include/llvm/MC/MCAsmBackend.h
llvm/lib/MC/MCAssembler.cpp
llvm/lib/MC/MCExpr.cpp
llvm/lib/MC/MCFragment.cpp
llvm/lib/MC/MCObjectStreamer.cpp
llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h
llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
llvm/test/MC/RISCV/Relocations/mc-dump.s
Removed:
################################################################################
diff --git a/llvm/include/llvm/MC/MCAsmBackend.h b/llvm/include/llvm/MC/MCAsmBackend.h
index c69fcec586bdf..bfc117578d363 100644
--- a/llvm/include/llvm/MC/MCAsmBackend.h
+++ b/llvm/include/llvm/MC/MCAsmBackend.h
@@ -106,20 +106,6 @@ class LLVM_ABI MCAsmBackend {
/// Get information on a fixup kind.
virtual MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const;
- /// Hook to check if extra nop bytes must be inserted for alignment directive.
- /// For some targets this may be necessary in order to support linker
- /// relaxation. The number of bytes to insert are returned in Size.
- virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF,
- unsigned &Size) {
- return false;
- }
-
- /// Hook which indicates if the target requires a fixup to be generated when
- /// handling an align directive in an executable section
- virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) {
- return false;
- }
-
// Evaluate a fixup, returning std::nullopt to use default handling for
// `Value` and `IsResolved`. Otherwise, returns `IsResolved` with the
// expectation that the hook updates `Value`.
@@ -177,6 +163,10 @@ class LLVM_ABI MCAsmBackend {
}
// Defined by linker relaxation targets.
+
+ // Return false to use default handling. Otherwise, set `Size` to the number
+ // of padding bytes.
+ virtual bool relaxAlign(MCFragment &F, unsigned &Size) { return false; }
virtual bool relaxDwarfLineAddr(MCFragment &, bool &WasRelaxed) const {
return false;
}
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index 3ab402a8e2832..674adc92257c9 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -196,6 +196,7 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
switch (F.getKind()) {
case MCFragment::FT_Data:
case MCFragment::FT_Relaxable:
+ case MCFragment::FT_Align:
case MCFragment::FT_LEB:
case MCFragment::FT_Dwarf:
case MCFragment::FT_DwarfFrame:
@@ -226,27 +227,6 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
case MCFragment::FT_SymbolId:
return 4;
- case MCFragment::FT_Align: {
- unsigned Offset = F.Offset + F.getFixedSize();
- unsigned Size = offsetToAlignment(Offset, F.getAlignment());
-
- // Insert extra Nops for code alignment if the target define
- // shouldInsertExtraNopBytesForCodeAlign target hook.
- if (F.getParent()->useCodeAlign() && F.hasAlignEmitNops() &&
- getBackend().shouldInsertExtraNopBytesForCodeAlign(F, Size))
- return F.getFixedSize() + Size;
-
- // If we are padding with nops, force the padding to be larger than the
- // minimum nop size.
- if (Size > 0 && F.hasAlignEmitNops()) {
- while (Size % getBackend().getMinimumNopSize())
- Size += F.getAlignment().value();
- }
- if (Size > F.getAlignMaxBytesToEmit())
- Size = 0;
- return F.getFixedSize() + Size;
- }
-
case MCFragment::FT_Org: {
const MCOrgFragment &OF = cast<MCOrgFragment>(F);
MCValue Value;
@@ -418,7 +398,6 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
switch (F.getKind()) {
case MCFragment::FT_Data:
case MCFragment::FT_Relaxable:
- case MCFragment::FT_Align:
case MCFragment::FT_LEB:
case MCFragment::FT_Dwarf:
case MCFragment::FT_DwarfFrame:
@@ -431,42 +410,41 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
const auto &EF = cast<MCFragment>(F);
OS << StringRef(EF.getContents().data(), EF.getContents().size());
OS << StringRef(EF.getVarContents().data(), EF.getVarContents().size());
- if (F.getKind() == MCFragment::FT_Align) {
- ++stats::EmittedAlignFragments;
- assert(F.getAlignFillLen() &&
- "Invalid virtual align in concrete fragment!");
-
- uint64_t Count = (FragmentSize - F.getFixedSize()) / F.getAlignFillLen();
- assert((FragmentSize - F.getFixedSize()) % F.getAlignFillLen() == 0 &&
- "computeFragmentSize computed size is incorrect");
-
- // See if we are aligning with nops, and if so do that first to try to
- // fill the Count bytes. Then if that did not fill any bytes or there are
- // any bytes left to fill use the Value and ValueSize to fill the rest. If
- // we are aligning with nops, ask that target to emit the right data.
- if (F.hasAlignEmitNops()) {
- if (!Asm.getBackend().writeNopData(OS, Count, F.getSubtargetInfo()))
- report_fatal_error("unable to write nop sequence of " + Twine(Count) +
- " bytes");
- } else {
- // Otherwise, write out in multiples of the value size.
- for (uint64_t i = 0; i != Count; ++i) {
- switch (F.getAlignFillLen()) {
- default:
- llvm_unreachable("Invalid size!");
- case 1:
- OS << char(F.getAlignFill());
- break;
- case 2:
- support::endian::write<uint16_t>(OS, F.getAlignFill(), Endian);
- break;
- case 4:
- support::endian::write<uint32_t>(OS, F.getAlignFill(), Endian);
- break;
- case 8:
- support::endian::write<uint64_t>(OS, F.getAlignFill(), Endian);
- break;
- }
+ } break;
+
+ case MCFragment::FT_Align: {
+ ++stats::EmittedAlignFragments;
+ OS << StringRef(F.getContents().data(), F.getContents().size());
+ assert(F.getAlignFillLen() &&
+ "Invalid virtual align in concrete fragment!");
+
+ uint64_t Count = (FragmentSize - F.getFixedSize()) / F.getAlignFillLen();
+ assert((FragmentSize - F.getFixedSize()) % F.getAlignFillLen() == 0 &&
+ "computeFragmentSize computed size is incorrect");
+
+ // In the nops mode, call the backend hook to write `Count` nops.
+ if (F.hasAlignEmitNops()) {
+ if (!Asm.getBackend().writeNopData(OS, Count, F.getSubtargetInfo()))
+ reportFatalInternalError("unable to write nop sequence of " +
+ Twine(Count) + " bytes");
+ } else {
+ // Otherwise, write out in multiples of the value size.
+ for (uint64_t i = 0; i != Count; ++i) {
+ switch (F.getAlignFillLen()) {
+ default:
+ llvm_unreachable("Invalid size!");
+ case 1:
+ OS << char(F.getAlignFill());
+ break;
+ case 2:
+ support::endian::write<uint16_t>(OS, F.getAlignFill(), Endian);
+ break;
+ case 4:
+ support::endian::write<uint32_t>(OS, F.getAlignFill(), Endian);
+ break;
+ case 8:
+ support::endian::write<uint64_t>(OS, F.getAlignFill(), Endian);
+ break;
}
}
}
@@ -720,11 +698,6 @@ void MCAssembler::layout() {
evaluateFixup(F, Fixup, Target, FixedValue,
/*RecordReloc=*/true, Contents);
}
- } else if (F.getKind() == MCFragment::FT_Align) {
- // For RISC-V linker relaxation, an alignment relocation might be
- // needed.
- if (F.hasAlignEmitNops())
- getBackend().shouldInsertFixupForCodeAlign(*this, F);
}
}
}
@@ -975,7 +948,32 @@ void MCAssembler::layoutSection(MCSection &Sec) {
uint64_t Offset = 0;
for (MCFragment &F : Sec) {
F.Offset = Offset;
- Offset += computeFragmentSize(F);
+ if (F.getKind() == MCFragment::FT_Align) {
+ Offset += F.getFixedSize();
+ unsigned Size = offsetToAlignment(Offset, F.getAlignment());
+ // In the nops mode, RISC-V style linker relaxation might adjust the size
+ // and add a fixup, even if `Size` is originally 0.
+ bool AlignFixup = false;
+ if (F.hasAlignEmitNops()) {
+ AlignFixup = getBackend().relaxAlign(F, Size);
+ // If the backend does not handle the fragment specially, pad with nops,
+ // but ensure that the padding is larger than the minimum nop size.
+ if (!AlignFixup)
+ while (Size % getBackend().getMinimumNopSize())
+ Size += F.getAlignment().value();
+ }
+ if (!AlignFixup && Size > F.getAlignMaxBytesToEmit())
+ Size = 0;
+ // Update the variable tail size. The content is ignored.
+ assert(F.VarContentStart == 0 &&
+ "VarContentStart should not be modified");
+ F.VarContentEnd = Size;
+ if (F.VarContentEnd > F.getParent()->ContentStorage.size())
+ F.getParent()->ContentStorage.resize(F.VarContentEnd);
+ Offset += Size;
+ } else {
+ Offset += computeFragmentSize(F);
+ }
}
}
diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp
index f0f1bd485258f..dbb2fd16eb2e5 100644
--- a/llvm/lib/MC/MCExpr.cpp
+++ b/llvm/lib/MC/MCExpr.cpp
@@ -370,7 +370,6 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
}
int64_t Num;
- unsigned Count;
if (DF) {
Displacement += DF->getContents().size();
} else if (F->getKind() == MCFragment::FT_Relaxable &&
@@ -380,9 +379,7 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
// data fragment.
Displacement += F->getSize();
} else if (F->getKind() == MCFragment::FT_Align && Layout &&
- F->hasAlignEmitNops() &&
- !Asm->getBackend().shouldInsertExtraNopBytesForCodeAlign(
- *F, Count)) {
+ F->isLinkerRelaxable()) {
Displacement += Asm->computeFragmentSize(*F);
} else if (auto *FF = dyn_cast<MCFillFragment>(F);
FF && FF->getNumValues().evaluateAsAbsolute(Num)) {
diff --git a/llvm/lib/MC/MCFragment.cpp b/llvm/lib/MC/MCFragment.cpp
index 569f2a5652869..3c395e5ccdb0b 100644
--- a/llvm/lib/MC/MCFragment.cpp
+++ b/llvm/lib/MC/MCFragment.cpp
@@ -83,8 +83,13 @@ LLVM_DUMP_METHOD void MCFragment::dump() const {
auto Fixed = getContents();
auto Var = getVarContents();
OS << " Size:" << Fixed.size();
- if (getKind() != MCFragment::FT_Data)
+ if (getKind() != MCFragment::FT_Data) {
OS << '+' << Var.size();
+ // FT_Align uses getVarContents to track the size, but the content is
+ // ignored and not useful.
+ if (getKind() == MCFragment::FT_Align)
+ Var = {};
+ }
OS << " [";
for (unsigned i = 0, e = Fixed.size(); i != e; ++i) {
if (i) OS << ",";
diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp
index af47df2967bb3..83e447abb7e06 100644
--- a/llvm/lib/MC/MCObjectStreamer.cpp
+++ b/llvm/lib/MC/MCObjectStreamer.cpp
@@ -535,14 +535,6 @@ void MCObjectStreamer::emitCodeAlignment(Align Alignment,
emitValueToAlignment(Alignment, 0, 1, MaxBytesToEmit);
F->u.align.EmitNops = true;
F->STI = STI;
-
- // With RISC-V style linker relaxation, mark the section as linker-relaxable
- // if the alignment is larger than the minimum NOP size.
- unsigned Size;
- if (getAssembler().getBackend().shouldInsertExtraNopBytesForCodeAlign(*F,
- Size)) {
- F->getParent()->setLinkerRelaxable();
- }
}
void MCObjectStreamer::emitValueToOffset(const MCExpr *Offset,
diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
index 032bfea71140f..8fa72bc9a30a7 100644
--- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
+++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
@@ -177,74 +177,6 @@ void LoongArchAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
}
}
-// Linker relaxation may change code size. We have to insert Nops
-// for .align directive when linker relaxation enabled. So then Linker
-// could satisfy alignment by removing Nops.
-// The function returns the total Nops Size we need to insert.
-bool LoongArchAsmBackend::shouldInsertExtraNopBytesForCodeAlign(
- const MCFragment &AF, unsigned &Size) {
- // Calculate Nops Size only when linker relaxation enabled.
- if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax))
- return false;
-
- // Ignore alignment if MaxBytesToEmit is less than the minimum Nop size.
- const unsigned MinNopLen = 4;
- if (AF.getAlignMaxBytesToEmit() < MinNopLen)
- return false;
- Size = AF.getAlignment().value() - MinNopLen;
- return AF.getAlignment() > MinNopLen;
-}
-
-// We need to insert R_LARCH_ALIGN relocation type to indicate the
-// position of Nops and the total bytes of the Nops have been inserted
-// when linker relaxation enabled.
-// The function inserts fixup_loongarch_align fixup which eventually will
-// transfer to R_LARCH_ALIGN relocation type.
-// The improved R_LARCH_ALIGN requires symbol index. The lowest 8 bits of
-// addend represent alignment and the other bits of addend represent the
-// maximum number of bytes to emit. The maximum number of bytes is zero
-// means ignore the emit limit.
-bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm,
- MCFragment &AF) {
- // Insert the fixup only when linker relaxation enabled.
- if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax))
- return false;
-
- // Calculate total Nops we need to insert. If there are none to insert
- // then simply return.
- unsigned InsertedNopBytes;
- if (!shouldInsertExtraNopBytesForCodeAlign(AF, InsertedNopBytes))
- return false;
-
- MCSection *Sec = AF.getParent();
- MCContext &Ctx = getContext();
- const MCExpr *Dummy = MCConstantExpr::create(0, Ctx);
- MCFixup Fixup = MCFixup::create(AF.getFixedSize(), Dummy, ELF::R_LARCH_ALIGN);
- unsigned MaxBytesToEmit = AF.getAlignMaxBytesToEmit();
-
- auto createExtendedValue = [&]() {
- const MCSymbolRefExpr *MCSym = getSecToAlignSym()[Sec];
- if (MCSym == nullptr) {
- // Define a marker symbol at the section with an offset of 0.
- MCSymbol *Sym = Ctx.createNamedTempSymbol("la-relax-align");
- Sym->setFragment(&*Sec->getBeginSymbol()->getFragment());
- Asm.registerSymbol(*Sym);
- MCSym = MCSymbolRefExpr::create(Sym, Ctx);
- getSecToAlignSym()[Sec] = MCSym;
- }
- return MCValue::get(&MCSym->getSymbol(), nullptr,
- MaxBytesToEmit << 8 | Log2(AF.getAlignment()));
- };
-
- uint64_t FixedValue = 0;
- MCValue Value = MaxBytesToEmit >= InsertedNopBytes
- ? MCValue::get(InsertedNopBytes)
- : createExtendedValue();
- Asm.getWriter().recordRelocation(AF, Fixup, Value, FixedValue);
-
- return true;
-}
-
bool LoongArchAsmBackend::shouldForceRelocation(const MCFixup &Fixup,
const MCValue &Target) {
switch (Fixup.getKind()) {
@@ -279,6 +211,53 @@ getRelocPairForSize(unsigned Size) {
}
}
+// Check if an R_LARCH_ALIGN relocation is needed for an alignment directive.
+// If conditions are met, compute the padding size and create a fixup encoding
+// the padding size in the addend. If MaxBytesToEmit is smaller than the padding
+// size, the fixup encodes MaxBytesToEmit in the higher bits and references a
+// per-section marker symbol.
+bool LoongArchAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) {
+ // Use default handling unless linker relaxation is enabled and the
+ // MaxBytesToEmit >= the nop size.
+ if (!F.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax))
+ return false;
+ const unsigned MinNopLen = 4;
+ unsigned MaxBytesToEmit = F.getAlignMaxBytesToEmit();
+ if (MaxBytesToEmit < MinNopLen)
+ return false;
+
+ Size = F.getAlignment().value() - MinNopLen;
+ if (F.getAlignment() <= MinNopLen)
+ return false;
+
+ MCContext &Ctx = getContext();
+ const MCExpr *Expr = nullptr;
+ if (MaxBytesToEmit >= Size) {
+ Expr = MCConstantExpr::create(Size, getContext());
+ } else {
+ MCSection *Sec = F.getParent();
+ const MCSymbolRefExpr *SymRef = getSecToAlignSym()[Sec];
+ if (SymRef == nullptr) {
+ // Define a marker symbol at the section with an offset of 0.
+ MCSymbol *Sym = Ctx.createNamedTempSymbol("la-relax-align");
+ Sym->setFragment(&*Sec->getBeginSymbol()->getFragment());
+ Asm->registerSymbol(*Sym);
+ SymRef = MCSymbolRefExpr::create(Sym, Ctx);
+ getSecToAlignSym()[Sec] = SymRef;
+ }
+ Expr = MCBinaryExpr::createAdd(
+ SymRef,
+ MCConstantExpr::create((MaxBytesToEmit << 8) | Log2(F.getAlignment()),
+ Ctx),
+ Ctx);
+ }
+ MCFixup Fixup =
+ MCFixup::create(0, Expr, FirstLiteralRelocationKind + ELF::R_LARCH_ALIGN);
+ F.setVarFixups({Fixup});
+ F.getParent()->setLinkerRelaxable();
+ return true;
+}
+
std::pair<bool, bool> LoongArchAsmBackend::relaxLEB128(MCFragment &F,
int64_t &Value) const {
const MCExpr &Expr = F.getLEBValue();
diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h
index 793e4093b1c9e..3d929fc49f95e 100644
--- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h
+++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h
@@ -45,19 +45,13 @@ class LoongArchAsmBackend : public MCAsmBackend {
MutableArrayRef<char> Data, uint64_t Value,
bool IsResolved) override;
- // Return Size with extra Nop Bytes for alignment directive in code section.
- bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF,
- unsigned &Size) override;
-
- // Insert target specific fixup type for alignment directive in code section.
- bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) override;
-
bool shouldForceRelocation(const MCFixup &Fixup, const MCValue &Target);
std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;
+ bool relaxAlign(MCFragment &F, unsigned &Size) override;
bool relaxDwarfLineAddr(MCFragment &F, bool &WasRelaxed) const override;
bool relaxDwarfCFA(MCFragment &F, bool &WasRelaxed) const override;
std::pair<bool, bool> relaxLEB128(MCFragment &F,
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
index 6bc313656f7c1..2c37c3bfd0fe3 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
@@ -302,6 +302,28 @@ void RISCVAsmBackend::relaxInstruction(MCInst &Inst,
Inst = std::move(Res);
}
+// Check if an R_RISCV_ALIGN relocation is needed for an alignment directive.
+// If conditions are met, compute the padding size and create a fixup encoding
+// the padding size in the addend.
+bool RISCVAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) {
+ // Use default handling unless linker relaxation is enabled and the alignment
+ // is larger than the nop size.
+ const MCSubtargetInfo *STI = F.getSubtargetInfo();
+ if (!STI->hasFeature(RISCV::FeatureRelax))
+ return false;
+ unsigned MinNopLen = STI->hasFeature(RISCV::FeatureStdExtZca) ? 2 : 4;
+ if (F.getAlignment() <= MinNopLen)
+ return false;
+
+ Size = F.getAlignment().value() - MinNopLen;
+ auto *Expr = MCConstantExpr::create(Size, getContext());
+ MCFixup Fixup =
+ MCFixup::create(0, Expr, FirstLiteralRelocationKind + ELF::R_RISCV_ALIGN);
+ F.setVarFixups({Fixup});
+ F.getParent()->setLinkerRelaxable();
+ return true;
+}
+
bool RISCVAsmBackend::relaxDwarfLineAddr(MCFragment &F,
bool &WasRelaxed) const {
MCContext &C = getContext();
@@ -887,55 +909,6 @@ void RISCVAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
}
}
-// Linker relaxation may change code size. We have to insert Nops
-// for .align directive when linker relaxation enabled. So then Linker
-// could satisfy alignment by removing Nops.
-// The function return the total Nops Size we need to insert.
-bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign(
- const MCFragment &AF, unsigned &Size) {
- // Calculate Nops Size only when linker relaxation enabled.
- const MCSubtargetInfo *STI = AF.getSubtargetInfo();
- if (!STI->hasFeature(RISCV::FeatureRelax))
- return false;
-
- unsigned MinNopLen = STI->hasFeature(RISCV::FeatureStdExtZca) ? 2 : 4;
-
- if (AF.getAlignment() <= MinNopLen) {
- return false;
- } else {
- Size = AF.getAlignment().value() - MinNopLen;
- return true;
- }
-}
-
-// We need to insert R_RISCV_ALIGN relocation type to indicate the
-// position of Nops and the total bytes of the Nops have been inserted
-// when linker relaxation enabled.
-// The function insert fixup_riscv_align fixup which eventually will
-// transfer to R_RISCV_ALIGN relocation type.
-bool RISCVAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm,
- MCFragment &AF) {
- // Insert the fixup only when linker relaxation enabled.
- const MCSubtargetInfo *STI = AF.getSubtargetInfo();
- if (!STI->hasFeature(RISCV::FeatureRelax))
- return false;
-
- // Calculate total Nops we need to insert. If there are none to insert
- // then simply return.
- unsigned Count;
- if (!shouldInsertExtraNopBytesForCodeAlign(AF, Count) || (Count == 0))
- return false;
-
- MCContext &Ctx = getContext();
- const MCExpr *Dummy = MCConstantExpr::create(0, Ctx);
- MCFixup Fixup = MCFixup::create(AF.getFixedSize(), Dummy, ELF::R_RISCV_ALIGN);
-
- uint64_t FixedValue = 0;
- MCValue NopBytes = MCValue::get(Count);
- Asm.getWriter().recordRelocation(AF, Fixup, NopBytes, FixedValue);
- return true;
-}
-
std::unique_ptr<MCObjectTargetWriter>
RISCVAsmBackend::createObjectTargetWriter() const {
return createRISCVELFObjectWriter(OSABI, Is64Bit);
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
index c4a1c74aa2c54..d97d63204e7e4 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h
@@ -38,13 +38,6 @@ class RISCVAsmBackend : public MCAsmBackend {
const MCTargetOptions &Options);
~RISCVAsmBackend() override = default;
- // Return Size with extra Nop Bytes for alignment directive in code section.
- bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF,
- unsigned &Size) override;
-
- // Insert target specific fixup type for alignment directive in code section.
- bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) override;
-
std::optional<bool> evaluateFixup(const MCFragment &, MCFixup &, MCValue &,
uint64_t &) override;
bool addReloc(const MCFragment &, const MCFixup &, const MCValue &,
@@ -72,6 +65,7 @@ class RISCVAsmBackend : public MCAsmBackend {
void relaxInstruction(MCInst &Inst,
const MCSubtargetInfo &STI) const override;
+ bool relaxAlign(MCFragment &F, unsigned &Size) override;
bool relaxDwarfLineAddr(MCFragment &F, bool &WasRelaxed) const override;
bool relaxDwarfCFA(MCFragment &F, bool &WasRelaxed) const override;
std::pair<bool, bool> relaxLEB128(MCFragment &LF,
diff --git a/llvm/test/MC/RISCV/Relocations/mc-dump.s b/llvm/test/MC/RISCV/Relocations/mc-dump.s
index 842851ce04843..e8f4b14ce3725 100644
--- a/llvm/test/MC/RISCV/Relocations/mc-dump.s
+++ b/llvm/test/MC/RISCV/Relocations/mc-dump.s
@@ -9,10 +9,12 @@
# CHECK-NEXT:0 Data LinkerRelaxable Size:8 [97,00,00,00,e7,80,00,00]
# CHECK-NEXT: Fixup @0 Value:specifier(19,ext) Kind:4023
# CHECK-NEXT: Symbol @0 $x
-# CHECK-NEXT:8 Align Size:0+0 []
+# CHECK-NEXT:8 Align Size:0+4 []
# CHECK-NEXT: Align:8 Fill:0 FillLen:1 MaxBytesToEmit:8 Nops
-# CHECK-NEXT:12 Align Size:4+0 [13,05,30,00]
+# CHECK-NEXT: Fixup @0 Value:4 Kind:[[#]]
+# CHECK-NEXT:12 Align Size:4+4 [13,05,30,00]
# CHECK-NEXT: Align:8 Fill:0 FillLen:1 MaxBytesToEmit:8 Nops
+# CHECK-NEXT: Fixup @4 Value:4 Kind:[[#]]
# CHECK-NEXT:]
call ext
More information about the llvm-commits
mailing list