[llvm] 85c9c16 - [RISCV] Support load clustering in the MachineScheduler (off by default) (#73754)

via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 29 02:02:20 PST 2023


Author: Alex Bradbury
Date: 2023-11-29T10:01:55Z
New Revision: 85c9c16895082dfaee3e8440440c83ba9b436da0

URL: https://github.com/llvm/llvm-project/commit/85c9c16895082dfaee3e8440440c83ba9b436da0
DIFF: https://github.com/llvm/llvm-project/commit/85c9c16895082dfaee3e8440440c83ba9b436da0.diff

LOG: [RISCV] Support load clustering in the MachineScheduler (off by default) (#73754)

This adds minimal support for load clustering, but disables it by
default. The intent is to iterate on the precise heuristic and the
question of turning this on by default in a separate PR. Although
previous discussion indicates hope that the MachineScheduler would
replace most uses of the SelectionDAG scheduler, it does seem most
targets aren't using MachineScheduler load clustering right now:
PPC+AArch64 seem to just use it to help with paired load/store formation
and although AMDGPU uses it for general clustering it also implements
ShouldScheduleLoadsNear for the SelectionDAG scheduler's clustering.

Added: 
    llvm/test/CodeGen/RISCV/misched-load-clustering.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.h
    llvm/lib/Target/RISCV/RISCVTargetMachine.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 6c5712dc795bc75..2918e5654db4f9f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -19,6 +19,7 @@
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/Analysis/ValueTracking.h"
 #include "llvm/CodeGen/LiveIntervals.h"
 #include "llvm/CodeGen/LiveVariables.h"
 #include "llvm/CodeGen/MachineCombinerPattern.h"
@@ -2231,6 +2232,60 @@ bool RISCVInstrInfo::getMemOperandsWithOffsetWidth(
   return true;
 }
 
+// TODO: This was copied from SIInstrInfo. Could it be lifted to a common
+// helper?
+static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
+                                  ArrayRef<const MachineOperand *> BaseOps1,
+                                  const MachineInstr &MI2,
+                                  ArrayRef<const MachineOperand *> BaseOps2) {
+  // Only examine the first "base" operand of each instruction, on the
+  // assumption that it represents the real base address of the memory access.
+  // Other operands are typically offsets or indices from this base address.
+  if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front()))
+    return true;
+
+  if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
+    return false;
+
+  auto MO1 = *MI1.memoperands_begin();
+  auto MO2 = *MI2.memoperands_begin();
+  if (MO1->getAddrSpace() != MO2->getAddrSpace())
+    return false;
+
+  auto Base1 = MO1->getValue();
+  auto Base2 = MO2->getValue();
+  if (!Base1 || !Base2)
+    return false;
+  Base1 = getUnderlyingObject(Base1);
+  Base2 = getUnderlyingObject(Base2);
+
+  if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
+    return false;
+
+  return Base1 == Base2;
+}
+
+bool RISCVInstrInfo::shouldClusterMemOps(
+    ArrayRef<const MachineOperand *> BaseOps1,
+    ArrayRef<const MachineOperand *> BaseOps2, unsigned ClusterSize,
+    unsigned NumBytes) const {
+  // If the mem ops (to be clustered) do not have the same base ptr, then they
+  // should not be clustered
+  if (!BaseOps1.empty() && !BaseOps2.empty()) {
+    const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent();
+    const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent();
+    if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2))
+      return false;
+  } else if (!BaseOps1.empty() || !BaseOps2.empty()) {
+    // If only one base op is empty, they do not have the same base ptr
+    return false;
+  }
+
+  // TODO: Use a more carefully chosen heuristic, e.g. only cluster if offsets
+  // indicate they likely share a cache line.
+  return ClusterSize <= 4;
+}
+
 // Set BaseReg (the base register operand), Offset (the byte offset being
 // accessed) and the access Width of the passed instruction that reads/writes
 // memory. Returns false if the instruction does not read/write memory or the

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 8f860077c303170..0954286a419bdd5 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -157,6 +157,11 @@ class RISCVInstrInfo : public RISCVGenInstrInfo {
       int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
       const TargetRegisterInfo *TRI) const override;
 
+  bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
+                           ArrayRef<const MachineOperand *> BaseOps2,
+                           unsigned ClusterSize,
+                           unsigned NumBytes) const override;
+
   bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
                                     const MachineOperand *&BaseOp,
                                     int64_t &Offset, unsigned &Width,

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 524c1a5ca50c5d9..b6c194f03f54209 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -95,6 +95,11 @@ static cl::opt<bool>
                         cl::desc("Enable Split RegisterAlloc for RVV"),
                         cl::init(false));
 
+static cl::opt<bool> EnableMISchedLoadClustering(
+    "riscv-misched-load-clustering", cl::Hidden,
+    cl::desc("Enable load clustering in the machine scheduler"),
+    cl::init(false));
+
 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
   RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target());
   RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target());
@@ -345,12 +350,16 @@ class RISCVPassConfig : public TargetPassConfig {
   ScheduleDAGInstrs *
   createMachineScheduler(MachineSchedContext *C) const override {
     const RISCVSubtarget &ST = C->MF->getSubtarget<RISCVSubtarget>();
+    ScheduleDAGMILive *DAG = nullptr;
+    if (EnableMISchedLoadClustering) {
+      DAG = createGenericSchedLive(C);
+      DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
+    }
     if (ST.hasMacroFusion()) {
-      ScheduleDAGMILive *DAG = createGenericSchedLive(C);
+      DAG = DAG ? DAG : createGenericSchedLive(C);
       DAG->addMutation(createRISCVMacroFusionDAGMutation());
-      return DAG;
     }
-    return nullptr;
+    return DAG;
   }
 
   ScheduleDAGInstrs *

diff  --git a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
new file mode 100644
index 000000000000000..4eb969a357a9eea
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
@@ -0,0 +1,43 @@
+; REQUIRES: asserts
+; RUN: llc -mtriple=riscv32 -verify-misched -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN:   | FileCheck -check-prefix=NOCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN:   | FileCheck -check-prefix=NOCLUSTER %s
+; RUN: llc -mtriple=riscv32 -riscv-misched-load-clustering -verify-misched \
+; RUN:     -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN:   | FileCheck -check-prefix=LDCLUSTER %s
+; RUN: llc -mtriple=riscv64 -riscv-misched-load-clustering -verify-misched \
+; RUN:     -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN:   | FileCheck -check-prefix=LDCLUSTER %s
+
+
+define i32 @load_clustering_1(ptr nocapture %p) {
+; NOCLUSTER: ********** MI Scheduling **********
+; NOCLUSTER-LABEL: load_clustering_1:%bb.0
+; NOCLUSTER: *** Final schedule for %bb.0 ***
+; NOCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12
+; NOCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8
+; NOCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
+; NOCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
+;
+; LDCLUSTER: ********** MI Scheduling **********
+; LDCLUSTER-LABEL: load_clustering_1:%bb.0
+; LDCLUSTER: *** Final schedule for %bb.0 ***
+; LDCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
+; LDCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12
+; LDCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8
+; LDCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
+entry:
+  %arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3
+  %val0 = load i32, i32* %arrayidx0
+  %arrayidx1 = getelementptr inbounds i32, ptr %p, i32 2
+  %val1 = load i32, i32* %arrayidx1
+  %tmp0 = add i32 %val0, %val1
+  %arrayidx2 = getelementptr inbounds i32, ptr %p, i32 1
+  %val2 = load i32, i32* %arrayidx2
+  %tmp1 = add i32 %tmp0, %val2
+  %arrayidx3 = getelementptr inbounds i32, ptr %p, i32 4
+  %val3 = load i32, i32* %arrayidx3
+  %tmp2 = add i32 %tmp1, %val3
+  ret i32 %tmp2
+}


        


More information about the llvm-commits mailing list