[llvm] [llvm][RISCV] Implement Zilsd load/store pair optimization (PR #158640)
Sam Elliott via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 17 20:08:38 PST 2025
================
@@ -0,0 +1,531 @@
+//===-- RISCVZilsdOptimizer.cpp - RISC-V Zilsd Load/Store Optimizer ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that performs load/store optimizations for the
+// RISC-V Zilsd extension. It combines pairs of 32-bit load/store instructions
+// into single 64-bit LD/SD instructions when possible.
+//
+// The pass runs in two phases:
+// 1. Pre-allocation: Reschedules loads/stores to bring consecutive memory
+// accesses closer together and forms LD/SD pairs with register hints.
+// 2. Post-allocation: Fixes invalid LD/SD instructions if register allocation
+// didn't provide suitable consecutive registers.
+//
+// Note: second phase is integrated into RISCVLoadStoreOptimizer
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVInstrInfo.h"
+#include "RISCVRegisterInfo.h"
+#include "RISCVSubtarget.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include <algorithm>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-zilsd-opt"
+
+STATISTIC(NumLDFormed, "Number of LD instructions formed");
+STATISTIC(NumSDFormed, "Number of SD instructions formed");
+
+static cl::opt<bool>
+ DisableZilsdOpt("disable-riscv-zilsd-opt", cl::Hidden, cl::init(false),
+ cl::desc("Disable Zilsd load/store optimization"));
+
+static cl::opt<unsigned> MaxRescheduleDistance(
+ "riscv-zilsd-max-reschedule-distance", cl::Hidden, cl::init(10),
+ cl::desc("Maximum distance for rescheduling load/store instructions"));
+
+namespace {
+
+//===----------------------------------------------------------------------===//
+// Pre-allocation Zilsd optimization pass
+//===----------------------------------------------------------------------===//
+class RISCVPreAllocZilsdOpt : public MachineFunctionPass {
+public:
+ static char ID;
+
+ RISCVPreAllocZilsdOpt() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ StringRef getPassName() const override {
+ return "RISC-V pre-allocation Zilsd load/store optimization";
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<AAResultsWrapperPass>();
+ AU.addRequired<MachineDominatorTreeWrapperPass>();
+ AU.setPreservesCFG();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+ enum class MemoryOffsetKind {
+ Imm = 0,
+ Global = 1,
+ CPI = 2,
+ BlockAddr = 3,
+ Unknown = 4,
+ };
+ using MemOffset = std::pair<MemoryOffsetKind, int>;
+ using BaseRegInfo = std::pair<unsigned, MemoryOffsetKind>;
+
+private:
+ bool isMemoryOp(const MachineInstr &MI);
+ bool rescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
+ bool canFormLdSdPair(MachineInstr *MI0, MachineInstr *MI1);
+ bool rescheduleOps(MachineBasicBlock *MBB,
+ SmallVectorImpl<MachineInstr *> &MIs, BaseRegInfo Base,
+ bool IsLoad,
+ DenseMap<MachineInstr *, unsigned> &MI2LocMap);
+ bool isSafeToMove(MachineInstr *MI, MachineInstr *Target, bool MoveForward);
+ MemOffset getMemoryOpOffset(const MachineInstr &MI);
+
+ const RISCVSubtarget *STI;
+ const RISCVInstrInfo *TII;
+ const RISCVRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+ AliasAnalysis *AA;
+ MachineDominatorTree *DT;
+ Align RequiredAlign;
+};
+
+} // end anonymous namespace
+
+char RISCVPreAllocZilsdOpt::ID = 0;
+
+INITIALIZE_PASS_BEGIN(RISCVPreAllocZilsdOpt, "riscv-prera-zilsd-opt",
+ "RISC-V pre-allocation Zilsd optimization", false, false)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
+INITIALIZE_PASS_END(RISCVPreAllocZilsdOpt, "riscv-prera-zilsd-opt",
+ "RISC-V pre-allocation Zilsd optimization", false, false)
+
+//===----------------------------------------------------------------------===//
+// Pre-allocation pass implementation
+//===----------------------------------------------------------------------===//
+
+bool RISCVPreAllocZilsdOpt::runOnMachineFunction(MachineFunction &MF) {
+
+ if (DisableZilsdOpt || skipFunction(MF.getFunction()))
+ return false;
+
+ STI = &MF.getSubtarget<RISCVSubtarget>();
+
+ // Only run on RV32 with Zilsd extension
+ if (STI->is64Bit() || !STI->hasStdExtZilsd())
+ return false;
+
+ TII = STI->getInstrInfo();
+ TRI = STI->getRegisterInfo();
+ MRI = &MF.getRegInfo();
+ AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
+ DT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
+
+ // Check alignment: default is 8-byte, but allow 4-byte with tune feature
+ // If unaligned scalar memory is enabled, allow any alignment
+ RequiredAlign = STI->enableUnalignedScalarMem() ? Align(1)
+ : STI->allowZilsd4ByteAlign() ? Align(4)
+ : Align(8);
+ bool Modified = false;
+ for (auto &MBB : MF) {
+ Modified |= rescheduleLoadStoreInstrs(&MBB);
+ }
+
+ return Modified;
+}
+
+RISCVPreAllocZilsdOpt::MemOffset
+RISCVPreAllocZilsdOpt::getMemoryOpOffset(const MachineInstr &MI) {
+ switch (MI.getOpcode()) {
+ case RISCV::LW:
+ case RISCV::SW: {
+ // For LW/SW, the offset is in operand 2
+ const MachineOperand &OffsetOp = MI.getOperand(2);
+
+ // Handle immediate offset
+ if (OffsetOp.isImm())
+ return std::make_pair(MemoryOffsetKind::Imm, OffsetOp.getImm());
+
+ // Handle symbolic operands with MO_LO flag (from MergeBaseOffset)
+ if (OffsetOp.getTargetFlags() & RISCVII::MO_LO) {
+ if (OffsetOp.isGlobal())
+ return std::make_pair(MemoryOffsetKind::Global, OffsetOp.getOffset());
+ if (OffsetOp.isCPI())
+ return std::make_pair(MemoryOffsetKind::CPI, OffsetOp.getOffset());
+ if (OffsetOp.isBlockAddress())
+ return std::make_pair(MemoryOffsetKind::BlockAddr,
+ OffsetOp.getOffset());
+ }
+
+ break;
+ }
+ default:
+ break;
+ }
+
+ return std::make_pair(MemoryOffsetKind::Unknown, 0);
+}
+
+bool RISCVPreAllocZilsdOpt::canFormLdSdPair(MachineInstr *MI0,
+ MachineInstr *MI1) {
+ if (!MI0->hasOneMemOperand() || !MI1->hasOneMemOperand())
+ return false;
+
+ // Get offsets and check they are consecutive
+ int Offset0 = getMemoryOpOffset(*MI0).second;
+ int Offset1 = getMemoryOpOffset(*MI1).second;
+
+ // Offsets must be 4 bytes apart
+ if (Offset1 - Offset0 != 4)
+ return false;
+
+ // We need to guarantee the alignment(base + offset) is legal,
+ // e.g. if required alignment is 8,
+ // Valid: global(align 8) + offset(0)
+ // Valid: global(align 4) + offset(4)
+ // Invalid: global(align 8) + offset(4)
+ const MachineMemOperand *MMO = *MI0->memoperands_begin();
+ unsigned Alignment = MMO->getBaseAlign().value() + Offset0;
+ if (Alignment < RequiredAlign.value() ||
+ (Alignment % RequiredAlign.value()) != 0)
----------------
lenary wrote:
If we know `MI0` is the instruction with the lower offset (you sorted them by offset, so I believe this to be true), then we can use `MachineMemOp::getAlign()` on the op from the lower instruction, and compare the two `Align`s directly, because the lower of the two offsets needs to be the correctly aligned one
```suggestion
if (MMO->getAlign() < RequiredAlign)
```
S
https://github.com/llvm/llvm-project/pull/158640
More information about the llvm-commits
mailing list