[llvm] r349003 - [AMDGPU] Simplify negated condition
Stanislav Mekhanoshin via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 12 19:17:40 PST 2018
Author: rampitec
Date: Wed Dec 12 19:17:40 2018
New Revision: 349003
URL: http://llvm.org/viewvc/llvm-project?rev=349003&view=rev
Log:
[AMDGPU] Simplify negated condition
Optimize sequence:
%sel = V_CNDMASK_B32_e64 0, 1, %cc
%cmp = V_CMP_NE_U32 1, %1
$vcc = S_AND_B64 $exec, %cmp
S_CBRANCH_VCC[N]Z
=>
$vcc = S_ANDN2_B64 $exec, %cc
S_CBRANCH_VCC[N]Z
It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
rebuildSetCC().
Differential Revision: https://reviews.llvm.org/D55402
Added:
llvm/trunk/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir
llvm/trunk/test/CodeGen/AMDGPU/optimize-negated-cond.ll
Modified:
llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp
llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.h
Modified: llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp?rev=349003&r1=349002&r2=349003&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp Wed Dec 12 19:17:40 2018
@@ -103,6 +103,122 @@ static MachineInstr* getOrExecSource(con
return SaveExecInst;
}
+// Optimize sequence
+// %sel = V_CNDMASK_B32_e64 0, 1, %cc
+// %cmp = V_CMP_NE_U32 1, %1
+// $vcc = S_AND_B64 $exec, %cmp
+// S_CBRANCH_VCC[N]Z
+// =>
+// $vcc = S_ANDN2_B64 $exec, %cc
+// S_CBRANCH_VCC[N]Z
+//
+// It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
+// rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
+// only 3 first instructions are really needed. S_AND_B64 with exec is a
+// required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
+// lanes.
+//
+// Returns %cc register on success.
+static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
+ const GCNSubtarget &ST,
+ MachineRegisterInfo &MRI,
+ LiveIntervals *LIS) {
+ const SIRegisterInfo *TRI = ST.getRegisterInfo();
+ const SIInstrInfo *TII = ST.getInstrInfo();
+ const unsigned AndOpc = AMDGPU::S_AND_B64;
+ const unsigned Andn2Opc = AMDGPU::S_ANDN2_B64;
+ const unsigned CondReg = AMDGPU::VCC;
+ const unsigned ExecReg = AMDGPU::EXEC;
+
+ auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
+ unsigned Opc = MI.getOpcode();
+ return Opc == AMDGPU::S_CBRANCH_VCCZ ||
+ Opc == AMDGPU::S_CBRANCH_VCCNZ; });
+ if (I == MBB.terminators().end())
+ return AMDGPU::NoRegister;
+
+ auto *And = TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister,
+ *I, MRI, LIS);
+ if (!And || And->getOpcode() != AndOpc ||
+ !And->getOperand(1).isReg() || !And->getOperand(2).isReg())
+ return AMDGPU::NoRegister;
+
+ MachineOperand *AndCC = &And->getOperand(1);
+ unsigned CmpReg = AndCC->getReg();
+ unsigned CmpSubReg = AndCC->getSubReg();
+ if (CmpReg == ExecReg) {
+ AndCC = &And->getOperand(2);
+ CmpReg = AndCC->getReg();
+ CmpSubReg = AndCC->getSubReg();
+ } else if (And->getOperand(2).getReg() != ExecReg) {
+ return AMDGPU::NoRegister;
+ }
+
+ auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, MRI, LIS);
+ if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
+ Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
+ Cmp->getParent() != And->getParent())
+ return AMDGPU::NoRegister;
+
+ MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
+ MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
+ if (Op1->isImm() && Op2->isReg())
+ std::swap(Op1, Op2);
+ if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
+ return AMDGPU::NoRegister;
+
+ unsigned SelReg = Op1->getReg();
+ auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, MRI, LIS);
+ if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
+ return AMDGPU::NoRegister;
+
+ Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
+ Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
+ MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
+ if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
+ Op1->getImm() != 0 || Op2->getImm() != 1)
+ return AMDGPU::NoRegister;
+
+ LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t'
+ << *Cmp << '\t' << *And);
+
+ unsigned CCReg = CC->getReg();
+ LIS->RemoveMachineInstrFromMaps(*And);
+ MachineInstr *Andn2 = BuildMI(MBB, *And, And->getDebugLoc(),
+ TII->get(Andn2Opc), And->getOperand(0).getReg())
+ .addReg(ExecReg)
+ .addReg(CCReg, CC->getSubReg());
+ And->eraseFromParent();
+ LIS->InsertMachineInstrInMaps(*Andn2);
+
+ LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
+
+ // Try to remove compare. Cmp value should not used in between of cmp
+ // and s_and_b64 if VCC or just unused if any other register.
+ if ((TargetRegisterInfo::isVirtualRegister(CmpReg) &&
+ MRI.use_nodbg_empty(CmpReg)) ||
+ (CmpReg == CondReg &&
+ std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
+ [TRI, CondReg](const MachineInstr &MI) {
+ return MI.readsRegister(CondReg, TRI); }))) {
+ LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
+
+ LIS->RemoveMachineInstrFromMaps(*Cmp);
+ Cmp->eraseFromParent();
+
+ // Try to remove v_cndmask_b32.
+ if (TargetRegisterInfo::isVirtualRegister(SelReg) &&
+ MRI.use_nodbg_empty(SelReg)) {
+ LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
+
+ LIS->RemoveMachineInstrFromMaps(*Sel);
+ Sel->eraseFromParent();
+ }
+ }
+
+ return CCReg;
+}
+
bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
@@ -117,6 +233,14 @@ bool SIOptimizeExecMaskingPreRA::runOnMa
for (MachineBasicBlock &MBB : MF) {
+ if (unsigned Reg = optimizeVcndVcmpPair(MBB, ST, MRI, LIS)) {
+ RecalcRegs.insert(Reg);
+ RecalcRegs.insert(AMDGPU::VCC_LO);
+ RecalcRegs.insert(AMDGPU::VCC_HI);
+ RecalcRegs.insert(AMDGPU::SCC);
+ Changed = true;
+ }
+
// Try to remove unneeded instructions before s_endpgm.
if (MBB.succ_empty()) {
if (MBB.empty())
Modified: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp?rev=349003&r1=349002&r2=349003&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp Wed Dec 12 19:17:40 2018
@@ -18,9 +18,12 @@
#include "SIInstrInfo.h"
#include "SIMachineFunctionInfo.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/RegisterScavenging.h"
+#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
@@ -1599,3 +1602,57 @@ SIRegisterInfo::getConstrainedRegClassFo
llvm_unreachable("not implemented");
}
}
+
+// Find reaching register definition
+MachineInstr *SIRegisterInfo::findReachingDef(unsigned Reg, unsigned SubReg,
+ MachineInstr &Use,
+ MachineRegisterInfo &MRI,
+ LiveIntervals *LIS) const {
+ auto &MDT = LIS->getAnalysis<MachineDominatorTree>();
+ SlotIndex UseIdx = LIS->getInstructionIndex(Use);
+ SlotIndex DefIdx;
+
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (!LIS->hasInterval(Reg))
+ return nullptr;
+ LiveInterval &LI = LIS->getInterval(Reg);
+ LaneBitmask SubLanes = SubReg ? getSubRegIndexLaneMask(SubReg)
+ : MRI.getMaxLaneMaskForVReg(Reg);
+ VNInfo *V = nullptr;
+ if (LI.hasSubRanges()) {
+ for (auto &S : LI.subranges()) {
+ if ((S.LaneMask & SubLanes) == SubLanes) {
+ V = S.getVNInfoAt(UseIdx);
+ break;
+ }
+ }
+ } else {
+ V = LI.getVNInfoAt(UseIdx);
+ }
+ if (!V)
+ return nullptr;
+ DefIdx = V->def;
+ } else {
+ // Find last def.
+ for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units) {
+ LiveRange &LR = LIS->getRegUnit(*Units);
+ if (VNInfo *V = LR.getVNInfoAt(UseIdx)) {
+ if (!DefIdx.isValid() ||
+ MDT.dominates(LIS->getInstructionFromIndex(DefIdx),
+ LIS->getInstructionFromIndex(V->def)))
+ DefIdx = V->def;
+ } else {
+ return nullptr;
+ }
+ }
+ }
+
+ MachineInstr *Def = LIS->getInstructionFromIndex(DefIdx);
+
+ if (!Def || !MDT.dominates(Def, &Use))
+ return nullptr;
+
+ assert(Def->modifiesRegister(Reg, this));
+
+ return Def;
+}
Modified: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.h?rev=349003&r1=349002&r2=349003&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.h Wed Dec 12 19:17:40 2018
@@ -228,6 +228,12 @@ public:
getConstrainedRegClassForOperand(const MachineOperand &MO,
const MachineRegisterInfo &MRI) const override;
+ // Find reaching register definition
+ MachineInstr *findReachingDef(unsigned Reg, unsigned SubReg,
+ MachineInstr &Use,
+ MachineRegisterInfo &MRI,
+ LiveIntervals *LIS) const;
+
private:
void buildSpillLoadStore(MachineBasicBlock::iterator MI,
unsigned LoadStoreOp,
Added: llvm/trunk/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir?rev=349003&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir Wed Dec 12 19:17:40 2018
@@ -0,0 +1,465 @@
+# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass=si-optimize-exec-masking-pre-ra -o - %s | FileCheck -check-prefix=GCN %s
+
+# GCN: name: negated_cond_vop2
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: $vcc = S_ANDN2_B64 $exec, %0, implicit-def $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop2
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+ $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop3
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: $vcc = S_ANDN2_B64 $exec, %0, implicit-def $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop3
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1, 1, implicit $exec
+ $vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop2_redef_vcc1
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+# GCN-NEXT: V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+# GCN-NEXT: $vcc_lo = COPY $sgpr0
+# GCN-NEXT: $vcc = S_AND_B64 $exec, $vcc, implicit-def dead $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop2_redef_vcc1
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+ $vcc_lo = COPY $sgpr0
+ $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop2_redef_vcc2
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+# GCN-NEXT: V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+# GCN-NEXT: $vcc_hi = COPY $sgpr0
+# GCN-NEXT: $vcc = S_AND_B64 $exec, $vcc, implicit-def dead $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop2_redef_vcc2
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+ $vcc_hi = COPY $sgpr0
+ $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop3_redef_cmp
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+# GCN-NEXT: %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1, 1, implicit $exec
+# GCN-NEXT: %2.sub1:sreg_64_xexec = COPY $sgpr0
+# GCN-NEXT: $vcc = S_AND_B64 %2, $exec, implicit-def dead $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop3_redef_cmp
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1, 1, implicit $exec
+ %2.sub1 = COPY $sgpr0
+ $vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_undef_vcc
+# GCN: $vcc = S_AND_B64 $exec, undef $vcc, implicit-def dead $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_undef_vcc
+body: |
+ bb.0:
+ $vcc = S_AND_B64 $exec, undef $vcc, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop3_imp_vcc
+# GCN: $vcc = IMPLICIT_DEF
+# GCN-NEXT: $vcc = S_ANDN2_B64 $exec, $vcc, implicit-def $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop3_imp_vcc
+body: |
+ bb.0:
+ $vcc = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, $vcc, implicit $exec
+ %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1, 1, implicit $exec
+ $vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop2_imp_vcc
+# GCN: $vcc = IMPLICIT_DEF
+# GCN-NEXT: $vcc = S_ANDN2_B64 $exec, $vcc, implicit-def $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop2_imp_vcc
+body: |
+ bb.0:
+ $vcc = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, $vcc, implicit $exec
+ V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+ $vcc = S_AND_B64 killed $vcc, $exec, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop3_redef_sel
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+# GCN-NEXT: %1:vgpr_32 = COPY $vgpr0
+# GCN-NEXT: %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1, 1, implicit $exec
+# GCN-NEXT: $vcc = S_AND_B64 %2, $exec, implicit-def dead $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop3_redef_sel
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ %1:vgpr_32 = COPY $vgpr0
+ %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1, 1, implicit $exec
+ $vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop2_used_sel
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+# GCN-NEXT: $vcc = S_ANDN2_B64 $exec, %0, implicit-def $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop2_used_sel
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+ $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ $vgpr0 = COPY %1
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop2_used_vcc
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+# GCN-NEXT: V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+# GCN-NEXT: $sgpr0_sgpr1 = COPY $vcc
+# GCN-NEXT: $vcc = S_ANDN2_B64 $exec, %0, implicit-def $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop2_used_vcc
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+ $sgpr0_sgpr1 = COPY $vcc
+ $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop3_sel_wrong_subreg1
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1.sub1:vreg_64 = IMPLICIT_DEF
+# GCN-NEXT: %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+# GCN-NEXT: %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub1, 1, implicit $exec
+# GCN-NEXT: $vcc = S_AND_B64 %2, $exec, implicit-def dead $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop3_sel_wrong_subreg1
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1.sub1 = IMPLICIT_DEF
+ %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub1, 1, implicit $exec
+ $vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop3_sel_wrong_subreg2
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+# GCN-NEXT: %1.sub1:vreg_64 = IMPLICIT_DEF
+# GCN-NEXT: %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub1, 1, implicit $exec
+# GCN-NEXT: $vcc = S_AND_B64 %2, $exec, implicit-def dead $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop3_sel_wrong_subreg2
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ %1.sub1 = IMPLICIT_DEF
+ %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub1, 1, implicit $exec
+ $vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop3_sel_right_subreg1
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1.sub1:vreg_64 = IMPLICIT_DEF
+# GCN-NEXT: $vcc = S_ANDN2_B64 $exec, %0, implicit-def $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop3_sel_right_subreg1
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1.sub1 = IMPLICIT_DEF
+ %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub0, 1, implicit $exec
+ $vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop3_sel_right_subreg2
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1.sub1:vreg_64 = IMPLICIT_DEF
+# GCN-NEXT: $vcc = S_ANDN2_B64 $exec, %0, implicit-def $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop3_sel_right_subreg2
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ %1.sub1 = IMPLICIT_DEF
+ %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub0, 1, implicit $exec
+ $vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop3_sel_subreg_overlap
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN-NEXT: %1.sub2:vreg_128 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+# GCN-NEXT: %1.sub2_sub3:vreg_128 = IMPLICIT_DEF
+# GCN-NEXT: %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub2, 1, implicit $exec
+# GCN-NEXT: $vcc = S_AND_B64 %2, $exec, implicit-def dead $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc
+---
+name: negated_cond_vop3_sel_subreg_overlap
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1.sub2:vreg_128 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ %1.sub2_sub3 = IMPLICIT_DEF
+ %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub2, 1, implicit $exec
+ $vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ S_BRANCH %bb.0
+
+ bb.2:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop2_dominated_blocks
+# GCN: %0:sreg_64_xexec = IMPLICIT_DEF
+# GCN: $vcc = S_ANDN2_B64 $exec, %0, implicit-def $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit $vcc
+---
+name: negated_cond_vop2_dominated_blocks
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+
+ bb.1:
+ V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+ $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.3, implicit killed $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_BRANCH %bb.1
+
+ bb.3:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop2_different_blocks_cmp_and
+# GCN: %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+# GCN: $vcc = S_AND_B64 $exec, %2, implicit-def dead $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit $vcc
+---
+name: negated_cond_vop2_different_blocks_cmp_and
+body: |
+ bb.0:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+ %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1, 1, implicit $exec
+
+ bb.1:
+ $vcc = S_AND_B64 $exec, killed %2, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.3, implicit killed $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_BRANCH %bb.1
+
+ bb.3:
+ S_ENDPGM
+...
+
+# GCN: name: negated_cond_vop2_not_dominated_blocks
+# GCN: V_CNDMASK_B32_e64 0, 1,
+# GCN: $vcc = S_AND_B64 $exec, $vcc, implicit-def dead $scc
+# GCN-NEXT: S_CBRANCH_VCCNZ %bb.4, implicit $vcc
+---
+name: negated_cond_vop2_not_dominated_blocks
+body: |
+ bb.0:
+ $vcc = IMPLICIT_DEF
+ %1 = IMPLICIT_DEF
+ S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ %0:sreg_64_xexec = IMPLICIT_DEF
+ %1:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %0, implicit $exec
+
+ bb.2:
+ V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
+ $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.4, implicit killed $vcc
+ S_BRANCH %bb.3
+
+ bb.3:
+ S_BRANCH %bb.2
+
+ bb.4:
+ S_ENDPGM
+...
Added: llvm/trunk/test/CodeGen/AMDGPU/optimize-negated-cond.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/optimize-negated-cond.ll?rev=349003&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/optimize-negated-cond.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/optimize-negated-cond.ll Wed Dec 12 19:17:40 2018
@@ -0,0 +1,75 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}negated_cond:
+; GCN: BB0_1:
+; GCN: v_cmp_eq_u32_e64 [[CC:[^,]+]],
+; GCN: BB0_2:
+; GCN-NOT: v_cndmask_b32
+; GCN-NOT: v_cmp
+; GCN: s_andn2_b64 vcc, exec, [[CC]]
+; GCN: s_cbranch_vccnz BB0_4
+define amdgpu_kernel void @negated_cond(i32 addrspace(1)* %arg1) {
+bb:
+ br label %bb1
+
+bb1:
+ %tmp1 = load i32, i32 addrspace(1)* %arg1
+ %tmp2 = icmp eq i32 %tmp1, 0
+ br label %bb2
+
+bb2:
+ %tmp3 = phi i32 [ 0, %bb1 ], [ %tmp6, %bb4 ]
+ %tmp4 = shl i32 %tmp3, 5
+ br i1 %tmp2, label %bb3, label %bb4
+
+bb3:
+ %tmp5 = add i32 %tmp4, 1
+ br label %bb4
+
+bb4:
+ %tmp6 = phi i32 [ %tmp5, %bb3 ], [ %tmp4, %bb2 ]
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp6
+ store i32 0, i32 addrspace(1)* %gep
+ %tmp7 = icmp eq i32 %tmp6, 32
+ br i1 %tmp7, label %bb1, label %bb2
+}
+
+; GCN-LABEL: {{^}}negated_cond_dominated_blocks:
+; GCN: v_cmp_eq_u32_e64 [[CC:[^,]+]],
+; GCN: BB1_1:
+; GCN-NOT: v_cndmask_b32
+; GCN-NOT: v_cmp
+; GCN: s_andn2_b64 vcc, exec, [[CC]]
+; GCN: s_cbranch_vccz BB1_3
+define amdgpu_kernel void @negated_cond_dominated_blocks(i32 addrspace(1)* %arg1) {
+bb:
+ br label %bb2
+
+bb2:
+ %tmp1 = load i32, i32 addrspace(1)* %arg1
+ %tmp2 = icmp eq i32 %tmp1, 0
+ br label %bb4
+
+bb3:
+ ret void
+
+bb4:
+ %tmp3 = phi i32 [ 0, %bb2 ], [ %tmp7, %bb7 ]
+ %tmp4 = shl i32 %tmp3, 5
+ br i1 %tmp2, label %bb5, label %bb6
+
+bb5:
+ %tmp5 = add i32 %tmp4, 1
+ br label %bb7
+
+bb6:
+ %tmp6 = add i32 %tmp3, 1
+ br label %bb7
+
+bb7:
+ %tmp7 = phi i32 [ %tmp5, %bb5 ], [ %tmp6, %bb6 ]
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tmp7
+ store i32 0, i32 addrspace(1)* %gep
+ %tmp8 = icmp eq i32 %tmp7, 32
+ br i1 %tmp8, label %bb3, label %bb4
+}
More information about the llvm-commits
mailing list