[llvm] 2480a31 - [AMDGPU] SILowerControlFlow::optimizeEndCF should remove empty basic block
via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 7 09:37:51 PDT 2020
Author: alex-t
Date: 2020-09-07T19:37:27+03:00
New Revision: 2480a31e5d69a5c2e8e900be3a7f706d77f5a5cc
URL: https://github.com/llvm/llvm-project/commit/2480a31e5d69a5c2e8e900be3a7f706d77f5a5cc
DIFF: https://github.com/llvm/llvm-project/commit/2480a31e5d69a5c2e8e900be3a7f706d77f5a5cc.diff
LOG: [AMDGPU] SILowerControlFlow::optimizeEndCF should remove empty basic block
optimizeEndCF removes EXEC restoring instruction case this instruction is the only one except the branch to the single successor and that successor contains EXEC mask restoring instruction that was lowered from END_CF belonging to IF_ELSE.
As a result of such optimization we get the basic block with the only one instruction that is a branch to the single successor.
In case the control flow can reach such an empty block from S_CBRANCH_EXEZ/EXECNZ it might happen that spill/reload instructions that were inserted later by register allocator are placed under exec == 0 condition and never execute.
Removing empty block solves the problem.
This change require further work to re-implement LIS updates. Recently, LIS is always nullptr in this pass. To enable it we need another patch to fix many places across the codegen.
Reviewed By: rampitec
Differential Revision: https://reviews.llvm.org/D86634
Added:
Modified:
llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
llvm/test/CodeGen/AMDGPU/collapse-endcf.mir
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
index 0246c6508e9f..914668f2b68a 100644
--- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -113,6 +113,8 @@ class SILowerControlFlow : public MachineFunctionPass {
void combineMasks(MachineInstr &MI);
+ bool removeMBBifRedundant(MachineBasicBlock &MBB);
+
void process(MachineInstr &MI);
// Skip to the next instruction, ignoring debug instructions, and trivial
@@ -154,9 +156,6 @@ class SILowerControlFlow : public MachineFunctionPass {
AU.addPreserved<SlotIndexes>();
AU.addPreserved<LiveIntervals>();
AU.addPreservedID(LiveVariablesID);
- AU.addPreservedID(MachineLoopInfoID);
- AU.addPreservedID(MachineDominatorsID);
- AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
@@ -604,6 +603,7 @@ void SILowerControlFlow::optimizeEndCf() {
if (LIS)
LIS->RemoveMachineInstrFromMaps(*MI);
MI->eraseFromParent();
+ removeMBBifRedundant(MBB);
}
}
}
@@ -658,6 +658,47 @@ void SILowerControlFlow::process(MachineInstr &MI) {
}
}
+bool SILowerControlFlow::removeMBBifRedundant(MachineBasicBlock &MBB) {
+ bool Redundant = true;
+ for (auto &I : MBB.instrs()) {
+ if (!I.isDebugInstr() && !I.isUnconditionalBranch())
+ Redundant = false;
+ }
+ if (Redundant) {
+ MachineBasicBlock *Succ = *MBB.succ_begin();
+ SmallVector<MachineBasicBlock *, 2> Preds(MBB.predecessors());
+ for (auto P : Preds) {
+ P->replaceSuccessor(&MBB, Succ);
+ MachineBasicBlock::iterator I(P->getFirstInstrTerminator());
+ while (I != P->end()) {
+ if (I->isBranch()) {
+ if (TII->getBranchDestBlock(*I) == &MBB) {
+ I->getOperand(0).setMBB(Succ);
+ break;
+ }
+ }
+ I++;
+ }
+ if (I == P->end()) {
+ MachineFunction *MF = P->getParent();
+ MachineFunction::iterator InsertPt =
+ P->getNextNode() ? MachineFunction::iterator(P->getNextNode())
+ : MF->end();
+ MF->splice(InsertPt, Succ);
+ }
+ }
+ MBB.removeSuccessor(Succ);
+ if (LIS) {
+ for (auto &I : MBB.instrs())
+ LIS->RemoveMachineInstrFromMaps(I);
+ }
+ MBB.clear();
+ MBB.eraseFromParent();
+ return true;
+ }
+ return false;
+}
+
bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
TII = ST.getInstrInfo();
diff --git a/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir b/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir
index d50973c9abf9..e87f1e7dc8dd 100644
--- a/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir
+++ b/llvm/test/CodeGen/AMDGPU/collapse-endcf.mir
@@ -16,16 +16,13 @@ body: |
; GCN: $exec = S_MOV_B64_term killed [[S_AND_B64_]]
; GCN: S_CBRANCH_EXECZ %bb.4, implicit $exec
; GCN: bb.1:
- ; GCN: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; GCN: successors: %bb.2(0x40000000), %bb.4(0x40000000)
; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
; GCN: [[S_AND_B64_1:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY1]], undef %3:sreg_64, implicit-def dead $scc
; GCN: $exec = S_MOV_B64_term killed [[S_AND_B64_1]]
- ; GCN: S_CBRANCH_EXECZ %bb.3, implicit $exec
+ ; GCN: S_CBRANCH_EXECZ %bb.4, implicit $exec
; GCN: bb.2:
- ; GCN: successors: %bb.3(0x80000000)
- ; GCN: bb.3:
; GCN: successors: %bb.4(0x80000000)
- ; GCN: DBG_VALUE
; GCN: bb.4:
; GCN: $exec = S_OR_B64 $exec, [[COPY]], implicit-def $scc
; GCN: DBG_VALUE
@@ -68,14 +65,12 @@ body: |
; GCN: $exec = S_MOV_B64_term killed [[S_AND_B64_]]
; GCN: S_CBRANCH_EXECZ %bb.5, implicit $exec
; GCN: bb.1:
- ; GCN: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; GCN: successors: %bb.2(0x40000000), %bb.4(0x40000000)
; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
; GCN: [[S_AND_B64_1:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY1]], undef %3:sreg_64, implicit-def dead $scc
; GCN: $exec = S_MOV_B64_term killed [[S_AND_B64_1]]
- ; GCN: S_CBRANCH_EXECZ %bb.3, implicit $exec
+ ; GCN: S_CBRANCH_EXECZ %bb.4, implicit $exec
; GCN: bb.2:
- ; GCN: successors: %bb.3(0x80000000)
- ; GCN: bb.3:
; GCN: successors: %bb.4(0x80000000)
; GCN: bb.4:
; GCN: successors: %bb.5(0x80000000)
@@ -118,14 +113,12 @@ body: |
; GCN: $exec = S_MOV_B64_term killed [[S_AND_B64_]]
; GCN: S_CBRANCH_EXECZ %bb.5, implicit $exec
; GCN: bb.1:
- ; GCN: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; GCN: successors: %bb.2(0x40000000), %bb.4(0x40000000)
; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
; GCN: [[S_AND_B64_1:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY1]], undef %3:sreg_64, implicit-def dead $scc
; GCN: $exec = S_MOV_B64_term killed [[S_AND_B64_1]]
- ; GCN: S_CBRANCH_EXECZ %bb.3, implicit $exec
+ ; GCN: S_CBRANCH_EXECZ %bb.4, implicit $exec
; GCN: bb.2:
- ; GCN: successors: %bb.3(0x80000000)
- ; GCN: bb.3:
; GCN: successors: %bb.4(0x80000000)
; GCN: bb.4:
; GCN: successors: %bb.5(0x80000000)
@@ -387,22 +380,19 @@ body: |
; GCN: $exec = S_MOV_B64_term killed [[S_AND_B64_]]
; GCN: S_CBRANCH_EXECZ %bb.4, implicit $exec
; GCN: bb.1:
- ; GCN: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; GCN: successors: %bb.2(0x40000000), %bb.5(0x40000000)
; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
; GCN: [[S_AND_B64_1:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY1]], undef %3:sreg_64, implicit-def dead $scc
; GCN: $exec = S_MOV_B64_term killed [[S_AND_B64_1]]
- ; GCN: S_CBRANCH_EXECZ %bb.3, implicit $exec
+ ; GCN: S_CBRANCH_EXECZ %bb.5, implicit $exec
; GCN: bb.2:
- ; GCN: successors: %bb.3(0x80000000)
- ; GCN: bb.3:
; GCN: successors: %bb.5(0x80000000)
- ; GCN: S_BRANCH %bb.5
- ; GCN: bb.4:
- ; GCN: $exec = S_OR_B64 $exec, [[COPY]], implicit-def $scc
- ; GCN: S_ENDPGM 0
; GCN: bb.5:
; GCN: successors: %bb.4(0x80000000)
; GCN: S_BRANCH %bb.4
+ ; GCN: bb.4:
+ ; GCN: $exec = S_OR_B64 $exec, [[COPY]], implicit-def $scc
+ ; GCN: S_ENDPGM 0
bb.0:
successors: %bb.1, %bb.4
More information about the llvm-commits
mailing list