[llvm] [BOLT][AArch64] Enabling Inlining for Memcpy for AArch64 in BOLT (PR #154929)
Sjoerd Meijer via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 28 04:09:25 PDT 2025
================
@@ -2597,6 +2597,115 @@ class AArch64MCPlusBuilder : public MCPlusBuilder {
getInstructionSize(const MCInst &Inst) const override {
return 4;
}
+
+ InstructionListType createInlineMemcpy(bool ReturnEnd) const override {
+ return createInlineMemcpy(ReturnEnd, std::nullopt);
+ }
+
+ std::optional<uint64_t>
+ extractMoveImmediate(const MCInst &Inst, MCPhysReg TargetReg) const override {
+ if (Inst.getOpcode() == AArch64::MOVZXi && Inst.getNumOperands() >= 3 &&
+ Inst.getOperand(0).isReg() &&
+ Inst.getOperand(0).getReg() == TargetReg &&
+ Inst.getOperand(1).isImm() && Inst.getOperand(2).isImm() &&
+ Inst.getOperand(2).getImm() == 0)
+ return Inst.getOperand(1).getImm();
+ return std::nullopt;
+ }
+
+ InstructionListType
+ createInlineMemcpy(bool ReturnEnd,
+ std::optional<uint64_t> KnownSize) const override {
+ InstructionListType Code;
+ uint64_t Size = *KnownSize;
+
+ generateSizeSpecificMemcpy(Code, Size);
+
+ // If _memcpy8, adjust X0 to return dest+size instead of dest.
+ if (ReturnEnd)
+ Code.emplace_back(MCInstBuilder(AArch64::ADDXri)
+ .addReg(AArch64::X0)
+ .addReg(AArch64::X0)
+ .addImm(Size)
+ .addImm(0));
+ return Code;
+ }
+
+ InstructionListType generateSizeSpecificMemcpy(InstructionListType &Code,
+ uint64_t Size) const {
+ auto addLoadStorePair = [&](unsigned LoadOpc, unsigned StoreOpc,
+ unsigned Reg, unsigned Offset = 0) {
+ Code.emplace_back(MCInstBuilder(LoadOpc)
+ .addReg(Reg)
+ .addReg(AArch64::X1)
+ .addImm(Offset));
+ Code.emplace_back(MCInstBuilder(StoreOpc)
+ .addReg(Reg)
+ .addReg(AArch64::X0)
+ .addImm(Offset));
+ };
+
+ // Generate optimal instruction sequences based on exact size.
+ switch (Size) {
+ case 1:
+ addLoadStorePair(AArch64::LDRBBui, AArch64::STRBBui, AArch64::W3);
+ break;
+ case 2:
+ addLoadStorePair(AArch64::LDRHHui, AArch64::STRHHui, AArch64::W3);
+ break;
+ case 4:
+ addLoadStorePair(AArch64::LDRWui, AArch64::STRWui, AArch64::W3);
+ break;
+ case 8:
+ addLoadStorePair(AArch64::LDRXui, AArch64::STRXui, AArch64::X3);
+ break;
+ case 16:
+ addLoadStorePair(AArch64::LDRQui, AArch64::STRQui, AArch64::Q0);
+ break;
+ case 32:
+ addLoadStorePair(AArch64::LDRQui, AArch64::STRQui, AArch64::Q0, 0);
+ addLoadStorePair(AArch64::LDRQui, AArch64::STRQui, AArch64::Q1, 1);
+ break;
+
+ default:
+ if (Size <= 64) {
+ // For sizes up to 64 bytes, greedily use the largest possible loads.
+ uint64_t Remaining = Size;
+ uint64_t Offset = 0;
+
+ while (Remaining >= 16) {
+ addLoadStorePair(AArch64::LDRQui, AArch64::STRQui, AArch64::Q0,
----------------
sjoerdmeijer wrote:
There is still quite a bit of code repetition going on here. Can you see if you can clean this up too?
https://github.com/llvm/llvm-project/pull/154929
More information about the llvm-commits
mailing list