[llvm] [RegAlloc] Account for non-rematerializable uses when applying weight discount (PR #159180)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 16 14:55:51 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Luke Lau (lukel97)
<details>
<summary>Changes</summary>
This aims to fix the issue that caused https://reviews.llvm.org/D106408 to be reverted.
CalcSpillWeights will reduce the weight of an interval by half if it's considered rematerializable, so it will be evicted before others.
It does this by checking TII.isTriviallyReMaterializable. However rematerialization may still fail if any of the defining MI's uses aren't available at the locations it needs to be rematerialized. LiveRangeEdit::canRematerializeAt calls allUsesAvailableAt to check this but CalcSpillWeights doesn't, so the two diverge.
This fixes it by also checking allUsesAvailableAt in CalcSpillWeights. There may be a better place to share the function than LiveIntervals.
In practice this has zero change AArch64/X86-64/RISC-V as measured on llvm-test-suite, but prevents weights from being perturbed in an upcoming patch which enables more rematerialization by re-attempting https://reviews.llvm.org/D106408
---
Patch is 154.09 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/159180.diff
17 Files Affected:
- (modified) llvm/include/llvm/CodeGen/LiveIntervals.h (+4)
- (modified) llvm/include/llvm/CodeGen/LiveRangeEdit.h (-5)
- (modified) llvm/lib/CodeGen/CalcSpillWeights.cpp (+11)
- (modified) llvm/lib/CodeGen/LiveIntervals.cpp (+52)
- (modified) llvm/lib/CodeGen/LiveRangeEdit.cpp (+2-59)
- (modified) llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll (+42-42)
- (modified) llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll (+43-43)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll (+96-96)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll (+26-26)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/remat.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll (+18-38)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll (+485-424)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll (+62-38)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll (+112-156)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll (+6-5)
- (modified) llvm/test/CodeGen/Thumb2/LowOverheadLoops/cond-vector-reduce-mve-codegen.ll (+10-5)
``````````diff
diff --git a/llvm/include/llvm/CodeGen/LiveIntervals.h b/llvm/include/llvm/CodeGen/LiveIntervals.h
index 1050b3daa0f57..6678ebaac333b 100644
--- a/llvm/include/llvm/CodeGen/LiveIntervals.h
+++ b/llvm/include/llvm/CodeGen/LiveIntervals.h
@@ -466,6 +466,10 @@ class LiveIntervals {
/// have any segments or value numbers.
LLVM_ABI void constructMainRangeFromSubranges(LiveInterval &LI);
+ /// \returns true if all registers used by \p OrigMI at \p OrigIdx are also
+ /// available with the same value at \p UseIdx.
+ bool allUsesAvailableAt(const MachineInstr &MI, SlotIndex UseIdx) const;
+
private:
/// Compute live intervals for all virtual registers.
void computeVirtRegs();
diff --git a/llvm/include/llvm/CodeGen/LiveRangeEdit.h b/llvm/include/llvm/CodeGen/LiveRangeEdit.h
index 3d5df194c71c9..adcca23c24fb3 100644
--- a/llvm/include/llvm/CodeGen/LiveRangeEdit.h
+++ b/llvm/include/llvm/CodeGen/LiveRangeEdit.h
@@ -193,11 +193,6 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
explicit Remat(const VNInfo *ParentVNI) : ParentVNI(ParentVNI) {}
};
- /// allUsesAvailableAt - Return true if all registers used by OrigMI at
- /// OrigIdx are also available with the same value at UseIdx.
- bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
- SlotIndex UseIdx) const;
-
/// canRematerializeAt - Determine if ParentVNI can be rematerialized at
/// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI.
bool canRematerializeAt(Remat &RM, VNInfo *OrigVNI, SlotIndex UseIdx);
diff --git a/llvm/lib/CodeGen/CalcSpillWeights.cpp b/llvm/lib/CodeGen/CalcSpillWeights.cpp
index b16694eafd90e..40df8c4415887 100644
--- a/llvm/lib/CodeGen/CalcSpillWeights.cpp
+++ b/llvm/lib/CodeGen/CalcSpillWeights.cpp
@@ -124,6 +124,17 @@ bool VirtRegAuxInfo::isRematerializable(const LiveInterval &LI,
if (!TII.isTriviallyReMaterializable(*MI))
return false;
+
+ // If MI has register uses, it will only be rematerializable if its uses are
+ // also live at the indices it will be rematerialized at.
+ const MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
+ for (MachineInstr &Use : MRI.use_instructions(Reg)) {
+ SlotIndex UseIdx = LIS.getInstructionIndex(Use);
+ if (LI.getVNInfoAt(UseIdx) != VNI)
+ continue;
+ if (!LIS.allUsesAvailableAt(*MI, UseIdx))
+ return false;
+ }
}
return true;
}
diff --git a/llvm/lib/CodeGen/LiveIntervals.cpp b/llvm/lib/CodeGen/LiveIntervals.cpp
index 3485a27335f13..24c54fd8ed64b 100644
--- a/llvm/lib/CodeGen/LiveIntervals.cpp
+++ b/llvm/lib/CodeGen/LiveIntervals.cpp
@@ -34,6 +34,7 @@
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/StackMaps.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
@@ -1820,3 +1821,54 @@ void LiveIntervals::constructMainRangeFromSubranges(LiveInterval &LI) {
LICalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
LICalc->constructMainRangeFromSubranges(LI);
}
+
+bool LiveIntervals::allUsesAvailableAt(const MachineInstr &MI,
+ SlotIndex UseIdx) const {
+ SlotIndex OrigIdx = getInstructionIndex(MI).getRegSlot(true);
+ UseIdx = std::max(UseIdx, UseIdx.getRegSlot(true));
+ for (const MachineOperand &MO : MI.operands()) {
+ if (!MO.isReg() || !MO.getReg() || !MO.readsReg())
+ continue;
+
+ // We can't remat physreg uses, unless it is a constant or target wants
+ // to ignore this use.
+ if (MO.getReg().isPhysical()) {
+ if (MRI->isConstantPhysReg(MO.getReg()) || TII->isIgnorableUse(MO))
+ continue;
+ return false;
+ }
+
+ const LiveInterval &li = getInterval(MO.getReg());
+ const VNInfo *OVNI = li.getVNInfoAt(OrigIdx);
+ if (!OVNI)
+ continue;
+
+ // Don't allow rematerialization immediately after the original def.
+ // It would be incorrect if OrigMI redefines the register.
+ // See PR14098.
+ if (SlotIndex::isSameInstr(OrigIdx, UseIdx))
+ return false;
+
+ if (OVNI != li.getVNInfoAt(UseIdx))
+ return false;
+
+ // Check that subrange is live at UseIdx.
+ if (li.hasSubRanges()) {
+ const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
+ unsigned SubReg = MO.getSubReg();
+ LaneBitmask LM = SubReg ? TRI->getSubRegIndexLaneMask(SubReg)
+ : MRI->getMaxLaneMaskForVReg(MO.getReg());
+ for (const LiveInterval::SubRange &SR : li.subranges()) {
+ if ((SR.LaneMask & LM).none())
+ continue;
+ if (!SR.liveAt(UseIdx))
+ return false;
+ // Early exit if all used lanes are checked. No need to continue.
+ LM &= ~SR.LaneMask;
+ if (LM.none())
+ break;
+ }
+ }
+ }
+ return true;
+}
diff --git a/llvm/lib/CodeGen/LiveRangeEdit.cpp b/llvm/lib/CodeGen/LiveRangeEdit.cpp
index 5514e4eb6cf3e..e08451d124606 100644
--- a/llvm/lib/CodeGen/LiveRangeEdit.cpp
+++ b/llvm/lib/CodeGen/LiveRangeEdit.cpp
@@ -101,60 +101,6 @@ bool LiveRangeEdit::anyRematerializable() {
return !Remattable.empty();
}
-/// allUsesAvailableAt - Return true if all registers used by OrigMI at
-/// OrigIdx are also available with the same value at UseIdx.
-bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI,
- SlotIndex OrigIdx,
- SlotIndex UseIdx) const {
- OrigIdx = OrigIdx.getRegSlot(true);
- UseIdx = std::max(UseIdx, UseIdx.getRegSlot(true));
- for (const MachineOperand &MO : OrigMI->operands()) {
- if (!MO.isReg() || !MO.getReg() || !MO.readsReg())
- continue;
-
- // We can't remat physreg uses, unless it is a constant or target wants
- // to ignore this use.
- if (MO.getReg().isPhysical()) {
- if (MRI.isConstantPhysReg(MO.getReg()) || TII.isIgnorableUse(MO))
- continue;
- return false;
- }
-
- LiveInterval &li = LIS.getInterval(MO.getReg());
- const VNInfo *OVNI = li.getVNInfoAt(OrigIdx);
- if (!OVNI)
- continue;
-
- // Don't allow rematerialization immediately after the original def.
- // It would be incorrect if OrigMI redefines the register.
- // See PR14098.
- if (SlotIndex::isSameInstr(OrigIdx, UseIdx))
- return false;
-
- if (OVNI != li.getVNInfoAt(UseIdx))
- return false;
-
- // Check that subrange is live at UseIdx.
- if (li.hasSubRanges()) {
- const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
- unsigned SubReg = MO.getSubReg();
- LaneBitmask LM = SubReg ? TRI->getSubRegIndexLaneMask(SubReg)
- : MRI.getMaxLaneMaskForVReg(MO.getReg());
- for (LiveInterval::SubRange &SR : li.subranges()) {
- if ((SR.LaneMask & LM).none())
- continue;
- if (!SR.liveAt(UseIdx))
- return false;
- // Early exit if all used lanes are checked. No need to continue.
- LM &= ~SR.LaneMask;
- if (LM.none())
- break;
- }
- }
- }
- return true;
-}
-
bool LiveRangeEdit::canRematerializeAt(Remat &RM, VNInfo *OrigVNI,
SlotIndex UseIdx) {
assert(ScannedRemattable && "Call anyRematerializable first");
@@ -164,12 +110,10 @@ bool LiveRangeEdit::canRematerializeAt(Remat &RM, VNInfo *OrigVNI,
return false;
// No defining instruction provided.
- SlotIndex DefIdx;
assert(RM.OrigMI && "No defining instruction for remattable value");
- DefIdx = LIS.getInstructionIndex(*RM.OrigMI);
// Verify that all used registers are available with the same values.
- if (!allUsesAvailableAt(RM.OrigMI, DefIdx, UseIdx))
+ if (!LIS.allUsesAvailableAt(*RM.OrigMI, UseIdx))
return false;
return true;
@@ -230,8 +174,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI,
// Since we're moving the DefMI load, make sure we're not extending any live
// ranges.
- if (!allUsesAvailableAt(DefMI, LIS.getInstructionIndex(*DefMI),
- LIS.getInstructionIndex(*UseMI)))
+ if (!LIS.allUsesAvailableAt(*DefMI, LIS.getInstructionIndex(*UseMI)))
return false;
// We also need to make sure it is safe to move the load.
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index fba27e3d548cf..ee18a426c1b12 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -2025,7 +2025,8 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV32-NEXT: vmv1r.v v7, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
@@ -2036,48 +2037,47 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: sub a3, a0, a1
; RV32-NEXT: addi a2, a2, 1365
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a2
+; RV32-NEXT: vmv.v.x v24, a2
; RV32-NEXT: sltu a2, a0, a3
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # vscale x 64-byte Folded Spill
-; RV32-NEXT: vand.vv v24, v24, v8, v0.t
-; RV32-NEXT: vsub.vv v16, v16, v24, v0.t
+; RV32-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
; RV32-NEXT: lui a3, 209715
; RV32-NEXT: addi a3, a3, 819
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
+; RV32-NEXT: vmv.v.x v24, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
+; RV32-NEXT: vand.vv v16, v8, v24, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # vscale x 64-byte Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
-; RV32-NEXT: vadd.vv v16, v24, v16, v0.t
-; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
+; RV32-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a3, 61681
; RV32-NEXT: addi a3, a3, -241
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
+; RV32-NEXT: vmv.v.x v16, a3
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
; RV32-NEXT: lui a3, 4112
; RV32-NEXT: addi a3, a3, 257
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
@@ -2098,32 +2098,32 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
; RV32-NEXT: vmv1r.v v0, v7
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: li a3, 40
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
-; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; RV32-NEXT: vand.vv v24, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v24, v8, v24, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v16, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; RV32-NEXT: vand.vv v8, v24, v16, v0.t
-; RV32-NEXT: vsrl.vi v24, v24, 2, v0.t
-; RV32-NEXT: vand.vv v24, v24, v16, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsrl.vi v24, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
+; RV32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vand.vv v8, v16, v24, v0.t
+; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
@@ -2263,21 +2263,21 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v0, (a4) # vscale x 64-byte Folded Spill
; RV32-NEXT: vand.vv v24, v24, v0
-; RV32-NEXT: vsub.vv v16, v16, v24
+; RV32-NEXT: vsub.vv v24, v16, v24
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v0, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v16, v0
-; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v16, v24, v0
+; RV32-NEXT: vsrl.vi v24, v24, 2
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v0, (a3) # vscale x 64-byte Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v0
+; RV32-NEXT: vand.vv v24, v24, v0
+; RV32-NEXT: vadd.vv v24, v16, v24
+; RV32-NEXT: vsrl.vi v16, v24, 4
; RV32-NEXT: vadd.vv v16, v24, v16
-; RV32-NEXT: vsrl.vi v24, v16, 4
-; RV32-NEXT: vadd.vv v16, v16, v24
; RV32-NEXT: lui a3, 61681
; RV32-NEXT: lui a4, 4112
; RV32-NEXT: addi a3, a3, -241
@@ -2312,16 +2312,16 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
; RV32-NEXT: vand.vv v24, v24, v0
-; RV32-NEXT: vsub.vv v8, v8, v24
+; RV32-NEXT: vsub.vv v24, v8, v24
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v24, v8
+; RV32-NEXT: vand.vv v8, v24, v0
+; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v24, v0
+; RV32-NEXT: vadd.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 4
; RV32-NEXT: vadd.vv v8, v8, v24
; RV32-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index 6bf882fe47fef..52eaa51051631 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -2193,7 +2193,8 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV32-NEXT: vmv1r.v v7, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
@@ -2207,49 +2208,48 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: lui a3, 349525
; RV32-NEXT: addi a3, a3, 1365
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vadd.vi v24, v16, -1, v0.t
+; RV32-NEXT: vadd.vi v8, v16, -1, v0.t
; RV32-NEXT: vnot.v v16, v16, v0.t
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vand.vv v8, v16, v8, v0.t
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
+; RV32-NEXT: vmv.v.x v24, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # vscale x 64-byte Folded Spill
-; RV32-NEXT: vand.vv v24, v24, v8, v0.t
-; RV32-NEXT: vsub.vv v16, v16, v24, v0.t
+; RV32-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a3, 209715
; RV32-NEXT: addi a3, a3, 819
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
+; RV32-NEXT: vmv.v.x v24, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
+; RV32-NEXT: vand.vv v16, v8, v24, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # vscale x 64-byte Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
-; RV32-NEXT: vadd.vv v16, v24, v16, v0.t
-; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
+; RV32-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a3, 61681
; RV32-NEXT: addi a3, a3, -241
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
+; RV32-NEXT: vmv.v.x v16, a3
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
; RV32-NEXT: lui a3, 4112
; RV32-NEXT: addi a3, a3, 257
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
@@ -2270,35 +2270,35 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
; RV32-NEXT: vmv1r.v v0, v7
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: li a3, 40
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vadd.vi v24, v8, -1, v0.t
+; RV32-NEXT: vadd.vi v16, v8, -1, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
-; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; RV32-NEXT: vand.vv v24, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v24, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NE...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/159180
More information about the llvm-commits
mailing list