[llvm] [NFC][MachineLoopInfo] Consider loads in `isLoopInvariant`. (PR #95632)
Mikhail Gudim via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 2 19:50:05 PDT 2024
https://github.com/mgudim updated https://github.com/llvm/llvm-project/pull/95632
>From b8eb34114814229f576ba4fd0ee338dcb4ff80d0 Mon Sep 17 00:00:00 2001
From: Mikhail Gudim <mgudim at gmail.com>
Date: Sat, 15 Jun 2024 00:20:28 -0400
Subject: [PATCH] [NFC][MachineLoopInfo] Consider loads in `isLoopInvariant`.
Currently `isLoopInvariant` does not consider loads. In fact, if this
function is called with a load instruction it may return incorrect
result because some store in the loop may alias with the load. In
MachineLICM there is separate logic to handle loads.
In this MR we move this logic inside `isLoopInvariant` because this
is a more appropriate place for it.
---
llvm/include/llvm/CodeGen/MachineLoopInfo.h | 3 +-
llvm/lib/CodeGen/EarlyIfConversion.cpp | 6 +-
llvm/lib/CodeGen/MachineLICM.cpp | 46 ++++---
llvm/lib/CodeGen/MachineLoopInfo.cpp | 28 ++++-
.../RISCV/machinelicm-invariant-load.mir | 119 ++++++++++++++++++
5 files changed, 177 insertions(+), 25 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/machinelicm-invariant-load.mir
diff --git a/llvm/include/llvm/CodeGen/MachineLoopInfo.h b/llvm/include/llvm/CodeGen/MachineLoopInfo.h
index 967c4a70ca469..b5d632cae89ec 100644
--- a/llvm/include/llvm/CodeGen/MachineLoopInfo.h
+++ b/llvm/include/llvm/CodeGen/MachineLoopInfo.h
@@ -82,7 +82,8 @@ class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> {
/// ExcludeReg can be used to exclude the given register from the check
/// i.e. when we're considering hoisting it's definition but not hoisted it
/// yet
- bool isLoopInvariant(MachineInstr &I, const Register ExcludeReg = 0) const;
+ bool isLoopInvariant(MachineInstr &I, const Register ExcludeReg = 0,
+ bool IgnoreAliasing = false) const;
void dump() const;
diff --git a/llvm/lib/CodeGen/EarlyIfConversion.cpp b/llvm/lib/CodeGen/EarlyIfConversion.cpp
index 2a7bee1618deb..5b1fced840e8a 100644
--- a/llvm/lib/CodeGen/EarlyIfConversion.cpp
+++ b/llvm/lib/CodeGen/EarlyIfConversion.cpp
@@ -887,7 +887,8 @@ bool EarlyIfConverter::shouldConvertIf() {
return false;
MachineInstr *Def = MRI->getVRegDef(Reg);
- return CurrentLoop->isLoopInvariant(*Def) ||
+ return CurrentLoop->isLoopInvariant(*Def, /*ExcludeReg = */ 0,
+ /*IgnoreAliasing = */ true) ||
all_of(Def->operands(), [&](MachineOperand &Op) {
if (Op.isImm())
return true;
@@ -898,7 +899,8 @@ bool EarlyIfConverter::shouldConvertIf() {
return false;
MachineInstr *Def = MRI->getVRegDef(Reg);
- return CurrentLoop->isLoopInvariant(*Def);
+ return CurrentLoop->isLoopInvariant(
+ *Def, /*ExcludeReg = */ 0, /*IgnoreAliasing = */ true);
});
}))
return false;
diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp
index 86eb259c09015..f0fbe5e9d80c1 100644
--- a/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/llvm/lib/CodeGen/MachineLICM.cpp
@@ -913,24 +913,6 @@ MachineLICMBase::calcRegisterCost(const MachineInstr *MI, bool ConsiderSeen,
return Cost;
}
-/// Return true if this machine instruction loads from global offset table or
-/// constant pool.
-static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI) {
- assert(MI.mayLoad() && "Expected MI that loads!");
-
- // If we lost memory operands, conservatively assume that the instruction
- // reads from everything..
- if (MI.memoperands_empty())
- return true;
-
- for (MachineMemOperand *MemOp : MI.memoperands())
- if (const PseudoSourceValue *PSV = MemOp->getPseudoValue())
- if (PSV->isGOT() || PSV->isConstantPool())
- return true;
-
- return false;
-}
-
// This function iterates through all the operands of the input store MI and
// checks that each register operand statisfies isCallerPreservedPhysReg.
// This means, the value being stored and the address where it is being stored
@@ -1001,6 +983,24 @@ static bool isCopyFeedingInvariantStore(const MachineInstr &MI,
return false;
}
+/// Return true if this machine instruction loads only from global offset table
+/// or constant pool.
+static bool onlyLoadsFromConstantMemory(MachineInstr &MI) {
+ assert(MI.mayLoad() && "Expected MI that loads!");
+
+ // If we lost memory operands, conservatively assume that the instruction
+ // reads from everything..
+ if (MI.memoperands_empty())
+ return false;
+
+ for (MachineMemOperand *MemOp : MI.memoperands())
+ if (const PseudoSourceValue *PSV = MemOp->getPseudoValue())
+ if (!PSV->isGOT() && !PSV->isConstantPool())
+ return false;
+
+ return true;
+}
+
/// Returns true if the instruction may be a suitable candidate for LICM.
/// e.g. If the instruction is a call, then it's obviously not safe to hoist it.
bool MachineLICMBase::IsLICMCandidate(MachineInstr &I, MachineLoop *CurLoop) {
@@ -1018,8 +1018,14 @@ bool MachineLICMBase::IsLICMCandidate(MachineInstr &I, MachineLoop *CurLoop) {
// Loads from constant memory are safe to speculate, for example indexed load
// from a jump table.
// Stores and side effects are already checked by isSafeToMove.
- if (I.mayLoad() && !mayLoadFromGOTOrConstantPool(I) &&
- !IsGuaranteedToExecute(I.getParent(), CurLoop)) {
+ // if (I.mayLoad() && !IsGuaranteedToExecute(I.getParent(), CurLoop)) {
+ // if (!onlyLoadsFromConstantMemory(I)) {
+ // LLVM_DEBUG(dbgs() << "LICM: Load not guaranteed to execute.\n");
+ // return false;
+ // }
+ //}
+ if (I.mayLoad() && !IsGuaranteedToExecute(I.getParent(), CurLoop) &&
+ !onlyLoadsFromConstantMemory(I)) {
LLVM_DEBUG(dbgs() << "LICM: Load not guaranteed to execute.\n");
return false;
}
diff --git a/llvm/lib/CodeGen/MachineLoopInfo.cpp b/llvm/lib/CodeGen/MachineLoopInfo.cpp
index 1019c53e57c6f..1ae05a394c8d5 100644
--- a/llvm/lib/CodeGen/MachineLoopInfo.cpp
+++ b/llvm/lib/CodeGen/MachineLoopInfo.cpp
@@ -215,14 +215,38 @@ bool MachineLoop::isLoopInvariantImplicitPhysReg(Register Reg) const {
[this](const MachineInstr &MI) { return this->contains(&MI); });
}
-bool MachineLoop::isLoopInvariant(MachineInstr &I,
- const Register ExcludeReg) const {
+/// Return true if this machine instruction loads only from global offset table
+/// or constant pool.
+static bool onlyLoadsFromConstantMemory(MachineInstr &MI) {
+ assert(MI.mayLoad() && "Expected MI that loads!");
+
+ // If we lost memory operands, conservatively assume that the instruction
+ // reads from everything..
+ if (MI.memoperands_empty())
+ return false;
+
+ for (MachineMemOperand *MemOp : MI.memoperands())
+ if (const PseudoSourceValue *PSV = MemOp->getPseudoValue())
+ if (!PSV->isGOT() && !PSV->isConstantPool())
+ return false;
+
+ return true;
+}
+
+bool MachineLoop::isLoopInvariant(MachineInstr &I, const Register ExcludeReg,
+ bool IgnoreAliasing) const {
MachineFunction *MF = I.getParent()->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
const TargetSubtargetInfo &ST = MF->getSubtarget();
const TargetRegisterInfo *TRI = ST.getRegisterInfo();
const TargetInstrInfo *TII = ST.getInstrInfo();
+ // TODO: If the address of a load is loop-invariant and doesn't alias any
+ // store in the loop then it is loop-invariant. For now only handle constant
+ // loads.
+ if (I.mayLoad() && !onlyLoadsFromConstantMemory(I) && !IgnoreAliasing)
+ return false;
+
// The instruction is loop invariant if all of its operands are.
for (const MachineOperand &MO : I.operands()) {
if (!MO.isReg())
diff --git a/llvm/test/CodeGen/RISCV/machinelicm-invariant-load.mir b/llvm/test/CodeGen/RISCV/machinelicm-invariant-load.mir
new file mode 100644
index 0000000000000..e54ae162cca92
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/machinelicm-invariant-load.mir
@@ -0,0 +1,119 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=riscv64 -x mir -run-pass=machinelicm -simplify-mir -verify-machineinstrs < %s | FileCheck %s
+
+---
+name: invariant_load
+tracksRegLiveness: true
+constants:
+ - id: 0
+ value: i64 -1085102592571150096
+ alignment: 8
+ isTargetSpecific: false
+body: |
+ ; CHECK-LABEL: name: invariant_load
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %base_addr:gpr = COPY $x10
+ ; CHECK-NEXT: %n:gpr = COPY $x11
+ ; CHECK-NEXT: %i_0:gpr = COPY $x0
+ ; CHECK-NEXT: %const_pool_addr_upper:gpr = LUI target-flags(riscv-hi) %const.0
+ ; CHECK-NEXT: %invariant_load:gpr = LD %const_pool_addr_upper, target-flags(riscv-lo) %const.0 :: (load (s64) from constant-pool)
+ ; CHECK-NEXT: PseudoBR %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: %offset:gpr = PHI %i_inc, %bb.1, %i_0, %bb.0
+ ; CHECK-NEXT: %addr:gpr = ADD %base_addr, %offset
+ ; CHECK-NEXT: %x:gpr = LW %addr, 0
+ ; CHECK-NEXT: %val:gpr = ADD %x, %invariant_load
+ ; CHECK-NEXT: SW %val, %addr, 0
+ ; CHECK-NEXT: %i_inc:gpr = ADDI %offset, 4
+ ; CHECK-NEXT: BLT %i_inc, %n, %bb.1
+ ; CHECK-NEXT: PseudoBR %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: PseudoRET
+ bb.0:
+ successors: %bb.1
+ liveins: $x10, $x11
+
+ %base_addr:gpr = COPY $x10
+ %n:gpr = COPY $x11
+ %i_0:gpr = COPY $x0
+ PseudoBR %bb.1
+
+ bb.1:
+ %offset:gpr = PHI %i_inc, %bb.1, %i_0, %bb.0
+ %addr:gpr = ADD %base_addr, %offset
+ %x:gpr = LW %addr, 0
+
+ %const_pool_addr_upper:gpr = LUI target-flags(riscv-hi) %const.0
+ %invariant_load:gpr = LD %const_pool_addr_upper, target-flags(riscv-lo) %const.0 :: (load (s64) from constant-pool)
+ %val:gpr = ADD %x, %invariant_load
+ SW %val, %addr, 0
+ %i_inc:gpr = ADDI %offset, 4
+ BLT %i_inc, %n, %bb.1
+ PseudoBR %bb.2
+
+ bb.2:
+ PseudoRET
+...
+
+# Do not hoist load out of the loop, because %other_addr may alias with %addr
+---
+name: not_invariant_load
+tracksRegLiveness: true
+constants:
+ - id: 0
+ value: i64 -1085102592571150096
+ alignment: 8
+ isTargetSpecific: false
+body: |
+ ; CHECK-LABEL: name: not_invariant_load
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: liveins: $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %base_addr:gpr = COPY $x10
+ ; CHECK-NEXT: %n:gpr = COPY $x11
+ ; CHECK-NEXT: %other_addr:gpr = COPY $x12
+ ; CHECK-NEXT: %i_0:gpr = COPY $x0
+ ; CHECK-NEXT: PseudoBR %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: %offset:gpr = PHI %i_inc, %bb.1, %i_0, %bb.0
+ ; CHECK-NEXT: %addr:gpr = ADD %base_addr, %offset
+ ; CHECK-NEXT: %x:gpr = LW %addr, 0
+ ; CHECK-NEXT: %load_may_alias:gpr = LD %other_addr, 0
+ ; CHECK-NEXT: %val:gpr = ADD %x, %load_may_alias
+ ; CHECK-NEXT: SW %val, %addr, 0
+ ; CHECK-NEXT: %i_inc:gpr = ADDI %offset, 4
+ ; CHECK-NEXT: BLT %i_inc, %n, %bb.1
+ ; CHECK-NEXT: PseudoBR %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: PseudoRET
+ bb.0:
+ successors: %bb.1
+ liveins: $x10, $x11, $x12
+
+ %base_addr:gpr = COPY $x10
+ %n:gpr = COPY $x11
+ %other_addr:gpr = COPY $x12
+ %i_0:gpr = COPY $x0
+ PseudoBR %bb.1
+
+ bb.1:
+ %offset:gpr = PHI %i_inc, %bb.1, %i_0, %bb.0
+ %addr:gpr = ADD %base_addr, %offset
+ %x:gpr = LW %addr, 0
+
+ %load_may_alias:gpr = LD %other_addr, 0
+ %val:gpr = ADD %x, %load_may_alias
+ SW %val, %addr, 0
+ %i_inc:gpr = ADDI %offset, 4
+ BLT %i_inc, %n, %bb.1
+ PseudoBR %bb.2
+
+ bb.2:
+ PseudoRET
+...
More information about the llvm-commits
mailing list