[clang] [RISCV] Eliminate dead li after emitting VSETVLIs (PR #65934)
Yingwei Zheng via cfe-commits
cfe-commits at lists.llvm.org
Sun Oct 1 04:23:37 PDT 2023
https://github.com/dtcxzyw updated https://github.com/llvm/llvm-project/pull/65934
>From 2fe5756dd4d49580d3a23b0ff1b72535f725915e Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Mon, 11 Sep 2023 15:51:46 +0800
Subject: [PATCH 1/4] [RISCV] Eliminate dead li after emitting VSETVLIs
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 22 ++++++++++++++---
.../RISCV/rvv/fixed-vectors-masked-gather.ll | 24 -------------------
2 files changed, 19 insertions(+), 27 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index b42ad269c18de6f..918c96beb29afca 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -757,7 +757,8 @@ class RISCVInsertVSETVLI : public MachineFunctionPass {
bool computeVLVTYPEChanges(const MachineBasicBlock &MBB,
VSETVLIInfo &Info) const;
void computeIncomingVLVTYPE(const MachineBasicBlock &MBB);
- void emitVSETVLIs(MachineBasicBlock &MBB);
+ void emitVSETVLIs(MachineBasicBlock &MBB,
+ SmallVectorImpl<MachineInstr *> &DeadVLInstrs);
void doLocalPostpass(MachineBasicBlock &MBB);
void doPRE(MachineBasicBlock &MBB);
void insertReadVL(MachineBasicBlock &MBB);
@@ -1216,7 +1217,8 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
return false;
}
-void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
+void RISCVInsertVSETVLI::emitVSETVLIs(
+ MachineBasicBlock &MBB, SmallVectorImpl<MachineInstr *> &DeadVLInstrs) {
VSETVLIInfo CurInfo = BlockInfo[MBB.getNumber()].Pred;
// Track whether the prefix of the block we've scanned is transparent
// (meaning has not yet changed the abstract state).
@@ -1255,6 +1257,13 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
if (VLOp.isReg()) {
// Erase the AVL operand from the instruction.
+ if (MachineInstr *MI = MRI->getVRegDef(VLOp.getReg());
+ MI && MI->getOpcode() == RISCV::ADDI &&
+ MI->getOperand(1).isReg() && MI->getOperand(2).isImm() &&
+ MI->getOperand(1).getReg() == RISCV::X0 &&
+ MI->getOperand(2).getImm() != 0)
+ DeadVLInstrs.push_back(MI);
+
VLOp.setReg(RISCV::NoRegister);
VLOp.setIsKill(false);
}
@@ -1580,8 +1589,9 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
// Phase 2 information to avoid adding vsetvlis before the first vector
// instruction in the block if the VL/VTYPE is satisfied by its
// predecessors.
+ SmallVector<MachineInstr *, 16> DeadVLInstrs;
for (MachineBasicBlock &MBB : MF)
- emitVSETVLIs(MBB);
+ emitVSETVLIs(MBB, DeadVLInstrs);
// Now that all vsetvlis are explicit, go through and do block local
// DSE and peephole based demanded fields based transforms. Note that
@@ -1592,6 +1602,12 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock &MBB : MF)
doLocalPostpass(MBB);
+ // Remove dead LI instructions that set VL.
+ for (MachineInstr *MI : DeadVLInstrs) {
+ if (MRI->use_nodbg_empty(MI->getOperand(0).getReg()))
+ MI->eraseFromParent();
+ }
+
// Once we're fully done rewriting all the instructions, do a final pass
// through to check for VSETVLIs which write to an unused destination.
// For the non X0, X0 variant, we can replace the destination register
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index f7352b4659e5a9b..0d306775528ed86 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -12393,7 +12393,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1
@@ -12431,7 +12430,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v14
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 5
@@ -12455,7 +12453,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 10, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 9
@@ -12470,7 +12467,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 11, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 10
@@ -12483,7 +12479,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 11
@@ -12496,7 +12491,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 13, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 12
@@ -12509,7 +12503,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 13
@@ -12533,7 +12526,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 18, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 17
@@ -12571,7 +12563,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 22, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 21
@@ -12595,7 +12586,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 26, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 25
@@ -12621,7 +12611,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 30, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 29
@@ -12634,7 +12623,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 31, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 30
@@ -12659,7 +12647,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 2
@@ -12671,7 +12658,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 3
@@ -12682,7 +12668,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 6
@@ -12694,7 +12679,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 7
@@ -12717,7 +12701,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 14
@@ -12729,7 +12712,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 15
@@ -12752,7 +12734,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
; RV64ZVE32F-NEXT: vsetivli zero, 19, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 18
@@ -12764,7 +12745,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 20, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 19
@@ -12775,7 +12755,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 23, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 22
@@ -12787,7 +12766,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 24, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 23
@@ -12810,7 +12788,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 27, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 26
@@ -12822,7 +12799,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
-; RV64ZVE32F-NEXT: li a3, 32
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vsetivli zero, 28, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 27
>From 05140d093dcd5bb1f3d05edb302e6977b1fd77df Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Mon, 11 Sep 2023 22:17:51 +0800
Subject: [PATCH 2/4] Remove unnecessary checks
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 1 -
1 file changed, 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 918c96beb29afca..ecb2af581fd65cb 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -1259,7 +1259,6 @@ void RISCVInsertVSETVLI::emitVSETVLIs(
// Erase the AVL operand from the instruction.
if (MachineInstr *MI = MRI->getVRegDef(VLOp.getReg());
MI && MI->getOpcode() == RISCV::ADDI &&
- MI->getOperand(1).isReg() && MI->getOperand(2).isImm() &&
MI->getOperand(1).getReg() == RISCV::X0 &&
MI->getOperand(2).getImm() != 0)
DeadVLInstrs.push_back(MI);
>From 45d7f86ea9ca544816cebfd031b4ba4e961e4280 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Mon, 11 Sep 2023 23:43:51 +0800
Subject: [PATCH 3/4] Rename `DeadVLInstrs` to `PossiblyDeadVLInstrs`
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index ecb2af581fd65cb..921cc3248c7f043 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -758,7 +758,7 @@ class RISCVInsertVSETVLI : public MachineFunctionPass {
VSETVLIInfo &Info) const;
void computeIncomingVLVTYPE(const MachineBasicBlock &MBB);
void emitVSETVLIs(MachineBasicBlock &MBB,
- SmallVectorImpl<MachineInstr *> &DeadVLInstrs);
+ SmallVectorImpl<MachineInstr *> &PossiblyDeadVLInstrs);
void doLocalPostpass(MachineBasicBlock &MBB);
void doPRE(MachineBasicBlock &MBB);
void insertReadVL(MachineBasicBlock &MBB);
@@ -1218,7 +1218,8 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
}
void RISCVInsertVSETVLI::emitVSETVLIs(
- MachineBasicBlock &MBB, SmallVectorImpl<MachineInstr *> &DeadVLInstrs) {
+ MachineBasicBlock &MBB,
+ SmallVectorImpl<MachineInstr *> &PossiblyDeadVLInstrs) {
VSETVLIInfo CurInfo = BlockInfo[MBB.getNumber()].Pred;
// Track whether the prefix of the block we've scanned is transparent
// (meaning has not yet changed the abstract state).
@@ -1261,7 +1262,7 @@ void RISCVInsertVSETVLI::emitVSETVLIs(
MI && MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).getReg() == RISCV::X0 &&
MI->getOperand(2).getImm() != 0)
- DeadVLInstrs.push_back(MI);
+ PossiblyDeadVLInstrs.push_back(MI);
VLOp.setReg(RISCV::NoRegister);
VLOp.setIsKill(false);
@@ -1588,9 +1589,9 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
// Phase 2 information to avoid adding vsetvlis before the first vector
// instruction in the block if the VL/VTYPE is satisfied by its
// predecessors.
- SmallVector<MachineInstr *, 16> DeadVLInstrs;
+ SmallVector<MachineInstr *, 16> PossiblyDeadVLInstrs;
for (MachineBasicBlock &MBB : MF)
- emitVSETVLIs(MBB, DeadVLInstrs);
+ emitVSETVLIs(MBB, PossiblyDeadVLInstrs);
// Now that all vsetvlis are explicit, go through and do block local
// DSE and peephole based demanded fields based transforms. Note that
@@ -1602,7 +1603,7 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
doLocalPostpass(MBB);
// Remove dead LI instructions that set VL.
- for (MachineInstr *MI : DeadVLInstrs) {
+ for (MachineInstr *MI : PossiblyDeadVLInstrs) {
if (MRI->use_nodbg_empty(MI->getOperand(0).getReg()))
MI->eraseFromParent();
}
>From c7801fea77a8b7ecb575ca6ee36b3f2e9e8d12a9 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Mon, 25 Sep 2023 05:17:02 +0800
Subject: [PATCH 4/4] fixup! [RISCV] Eliminate dead li after emitting VSETVLIs
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 11 +++--------
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 8 ++++++++
llvm/lib/Target/RISCV/RISCVInstrInfo.h | 2 ++
3 files changed, 13 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index fd779474297d30e..2a193349c859957 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -491,10 +491,7 @@ class VSETVLIInfo {
if (getAVLReg() == RISCV::X0)
return true;
if (MachineInstr *MI = MRI.getVRegDef(getAVLReg());
- MI && MI->getOpcode() == RISCV::ADDI &&
- MI->getOperand(1).isReg() && MI->getOperand(2).isImm() &&
- MI->getOperand(1).getReg() == RISCV::X0 &&
- MI->getOperand(2).getImm() != 0)
+ MI && RISCV::isLoadSImm12(*MI, /*NonZero*/ true))
return true;
return false;
}
@@ -1257,13 +1254,11 @@ void RISCVInsertVSETVLI::emitVSETVLIs(
if (RISCVII::hasVLOp(TSFlags)) {
MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
if (VLOp.isReg()) {
- // Erase the AVL operand from the instruction.
if (MachineInstr *MI = MRI->getVRegDef(VLOp.getReg());
- MI && MI->getOpcode() == RISCV::ADDI &&
- MI->getOperand(1).getReg() == RISCV::X0 &&
- MI->getOperand(2).getImm() != 0)
+ MI && RISCV::isLoadSImm12(*MI, /*NonZero*/ true))
PossiblyDeadVLInstrs.push_back(MI);
+ // Erase the AVL operand from the instruction.
VLOp.setReg(RISCV::NoRegister);
VLOp.setIsKill(false);
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 816ceaf95607e71..aa5d50d21630ca6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -2752,6 +2752,14 @@ bool RISCV::isZEXT_B(const MachineInstr &MI) {
MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 255;
}
+// Returns true if this is the li rd, simm12 pattern, addi rd, x0, simm12.
+bool RISCV::isLoadSImm12(const MachineInstr &MI, bool NonZero) {
+ return MI.getOpcode() == RISCV::ADDI && MI.getOperand(1).isReg() &&
+ MI.getOperand(1).getReg() == RISCV::X0 &&
+ (!NonZero ||
+ (MI.getOperand(2).isImm() && MI.getOperand(2).getImm() != 0));
+}
+
static bool isRVVWholeLoadStore(unsigned Opcode) {
switch (Opcode) {
default:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 99c907a98121ae3..f8d29cbd2df71d2 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -248,6 +248,8 @@ namespace RISCV {
bool isSEXT_W(const MachineInstr &MI);
bool isZEXT_W(const MachineInstr &MI);
bool isZEXT_B(const MachineInstr &MI);
+// Returns true if this is the li rd, simm12 pattern, addi rd, x0, simm12.
+bool isLoadSImm12(const MachineInstr &MI, bool NonZero);
// Returns true if the given MI is an RVV instruction opcode for which we may
// expect to see a FrameIndex operand.
More information about the cfe-commits
mailing list