[llvm] [BOLT][AArch64] Partial support for lite mode (PR #133014)
Paschalis Mpeis via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 26 04:14:25 PDT 2025
================
@@ -1673,12 +1678,104 @@ bool BinaryFunction::scanExternalRefs() {
if (BranchTargetSymbol) {
BC.MIB->replaceBranchTarget(Instruction, BranchTargetSymbol,
Emitter.LocalCtx.get());
- } else if (!llvm::any_of(Instruction,
- [](const MCOperand &Op) { return Op.isExpr(); })) {
- // Skip assembly if the instruction may not have any symbolic operands.
- continue;
} else {
+ bool NeedsPatch = false;
analyzeInstructionForFuncReference(Instruction);
+ for (unsigned OpNum = 0; OpNum < MCPlus::getNumPrimeOperands(Instruction);
+ ++OpNum) {
+ const MCSymbol *Symbol = BC.MIB->getTargetSymbol(Instruction, OpNum);
+ if (!Symbol)
+ continue;
+ if (!ignoreReference(Symbol)) {
+ NeedsPatch = true;
+ break;
+ }
+ }
+ if (!NeedsPatch)
+ continue;
+ }
+
+ // For AArch64, we need to undo relaxation done by the linker if the target
+ // of the instruction is a function that we plan to move.
+ const Relocation *Rel;
+ if (BC.isAArch64() && (Rel = getRelocationAt(Offset))) {
+ // NOP+ADR sequence can originate from either ADRP+ADD or ADRP+LDR.
+ // In either case, we convert it into ADRP+ADD.
+ if (BC.MIB->isADR(Instruction) &&
+ (Rel->Type == ELF::R_AARCH64_ADD_ABS_LO12_NC ||
+ Rel->Type == ELF::R_AARCH64_LD64_GOT_LO12_NC)) {
+ if (!BC.MIB->isNoop(PrevInstruction)) {
+ const MCSymbol *Symbol = BC.MIB->getTargetSymbol(Instruction);
+ BC.errs() << "BOLT-WARNING: cannot undo linker relaxation for "
+ "instruction at 0x"
+ << Twine::utohexstr(AbsoluteInstrAddr) << " referencing "
+ << Symbol->getName() << '\n';
+ if (BinaryFunction *TargetBF = BC.getFunctionForSymbol(Symbol))
+ TargetBF->setIgnored();
+ continue;
+ }
+
+ InstructionListType AdrpAdd =
+ BC.MIB->undoAdrpAddRelaxation(Instruction, BC.Ctx.get());
+ LLVM_DEBUG({
+ dbgs() << "BOLT-DEBUG: linker relaxation undone for instruction "
+ "at 0x"
+ << Twine::utohexstr(AbsoluteInstrAddr) << '\n';
+ });
+ InstructionPatches.push_back({AbsoluteInstrAddr - 4, AdrpAdd[0]});
+ InstructionPatches.push_back({AbsoluteInstrAddr, AdrpAdd[1]});
+ continue;
+ }
+
+ // If ADR was emitted by the compiler/assembler to reference a nearby
+ // local function, we cannot move away that function due to ADR address
+ // span limitation. Hence, we skip the optimization.
+ if (BC.MIB->isADR(Instruction) &&
+ Rel->Type == ELF::R_AARCH64_ADR_PREL_LO21) {
+ BC.errs() << "BOLT-WARNING: unable to convert ADR that references "
+ << Rel->Symbol->getName()
+ << ". Will not optimize the target\n";
+ if (BinaryFunction *TargetBF = BC.getFunctionForSymbol(Rel->Symbol))
+ TargetBF->setIgnored();
+ continue;
+ }
+
+ // In the case of GOT load, ADRP+LDR can also be converted into ADRP+ADD.
+ // When this happens, it's not always possible to properly symbolize ADRP
+ // operand and we might have to adjust the operand based on the next
+ // instruction.
+ if (BC.MIB->isAddXri(Instruction) &&
+ Rel->Type == ELF::R_AARCH64_LD64_GOT_LO12_NC) {
+ if (!BC.MIB->matchAdrpAddPair(PrevInstruction, Instruction)) {
+ BC.errs() << "BOLT-ERROR: cannot find matching ADRP for relaxed LDR "
+ "instruction at 0x"
+ << Twine::utohexstr(AbsoluteInstrAddr) << '\n';
+ exit(1);
+ }
+
+ // Check if ADRP was already patched. If not, add a new patch for it.
----------------
paschalis-mpeis wrote:
Just confirming, this is because the fully-relaxed case was already scheduled for patching above, correct?
https://github.com/llvm/llvm-project/pull/133014
More information about the llvm-commits
mailing list