[llvm-branch-commits] [llvm] AMDGPU/GlobalISel: Temporal divergence lowering (non i1) (PR #124298)
Petar Avramovic via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Jan 24 08:15:32 PST 2025
https://github.com/petar-avramovic created https://github.com/llvm/llvm-project/pull/124298
Record all uses outside cycle with divergent exit during
propagateTemporalDivergence in Uniformity analysis.
With this list of candidates for temporal divergence lowering,
excluding known lane masks from control flow intrinsics,
find sources from inside the cycle that are not i1 and uniform.
Temporal divergence lowering (non i1):
create copy(v_mov) to vgpr, with implicit exec (to stop other
passes from moving this copy outside of the cycle) and use this
vgpr outside of the cycle instead of original uniform source.
>From 3e04401258c91639105b1f2f17a84badbdf928ae Mon Sep 17 00:00:00 2001
From: Petar Avramovic <Petar.Avramovic at amd.com>
Date: Fri, 24 Jan 2025 16:56:30 +0100
Subject: [PATCH] AMDGPU/GlobalISel: Temporal divergence lowering (non i1)
Record all uses outside cycle with divergent exit during
propagateTemporalDivergence in Uniformity analysis.
With this list of candidates for temporal divergence lowering,
excluding known lane masks from control flow intrinsics,
find sources from inside the cycle that are not i1 and uniform.
Temporal divergence lowering (non i1):
create copy(v_mov) to vgpr, with implicit exec (to stop other
passes from moving this copy outside of the cycle) and use this
vgpr outside of the cycle instead of original uniform source.
---
llvm/include/llvm/ADT/GenericUniformityImpl.h | 37 +++++++++++++++
llvm/include/llvm/ADT/GenericUniformityInfo.h | 6 +++
llvm/lib/Analysis/UniformityAnalysis.cpp | 3 +-
.../lib/CodeGen/MachineUniformityAnalysis.cpp | 8 ++--
.../AMDGPUGlobalISelDivergenceLowering.cpp | 47 ++++++++++++++++++-
.../lib/Target/AMDGPU/AMDGPURegBankSelect.cpp | 24 ++++++++--
llvm/lib/Target/AMDGPU/SILowerI1Copies.h | 6 +++
...divergent-i1-phis-no-lane-mask-merging.mir | 7 +--
...ergence-divergent-i1-used-outside-loop.mir | 19 ++++----
.../divergence-temporal-divergent-reg.ll | 18 +++----
.../divergence-temporal-divergent-reg.mir | 3 +-
.../AMDGPU/GlobalISel/regbankselect-mui.ll | 17 +++----
12 files changed, 153 insertions(+), 42 deletions(-)
diff --git a/llvm/include/llvm/ADT/GenericUniformityImpl.h b/llvm/include/llvm/ADT/GenericUniformityImpl.h
index bd09f4fe43e087..91ee0e41332199 100644
--- a/llvm/include/llvm/ADT/GenericUniformityImpl.h
+++ b/llvm/include/llvm/ADT/GenericUniformityImpl.h
@@ -342,6 +342,10 @@ template <typename ContextT> class GenericUniformityAnalysisImpl {
typename SyncDependenceAnalysisT::DivergenceDescriptor;
using BlockLabelMapT = typename SyncDependenceAnalysisT::BlockLabelMap;
+ // Use outside cycle with divergent exit
+ using UOCWDE =
+ std::tuple<const InstructionT *, const InstructionT *, const CycleT *>;
+
GenericUniformityAnalysisImpl(const DominatorTreeT &DT, const CycleInfoT &CI,
const TargetTransformInfo *TTI)
: Context(CI.getSSAContext()), F(*Context.getFunction()), CI(CI),
@@ -395,6 +399,14 @@ template <typename ContextT> class GenericUniformityAnalysisImpl {
}
void print(raw_ostream &out) const;
+ SmallVector<UOCWDE, 8> UsesOutsideCycleWithDivergentExit;
+ void recordUseOutsideCycleWithDivergentExit(const InstructionT *,
+ const InstructionT *,
+ const CycleT *);
+ inline iterator_range<UOCWDE *> getUsesOutsideCycleWithDivergentExit() const {
+ return make_range(UsesOutsideCycleWithDivergentExit.begin(),
+ UsesOutsideCycleWithDivergentExit.end());
+ }
protected:
/// \brief Value/block pair representing a single phi input.
@@ -1129,6 +1141,14 @@ void GenericUniformityAnalysisImpl<ContextT>::compute() {
}
}
+template <typename ContextT>
+void GenericUniformityAnalysisImpl<
+ ContextT>::recordUseOutsideCycleWithDivergentExit(const InstructionT *Inst,
+ const InstructionT *User,
+ const CycleT *Cycle) {
+ UsesOutsideCycleWithDivergentExit.emplace_back(Inst, User, Cycle);
+}
+
template <typename ContextT>
bool GenericUniformityAnalysisImpl<ContextT>::isAlwaysUniform(
const InstructionT &Instr) const {
@@ -1180,6 +1200,16 @@ void GenericUniformityAnalysisImpl<ContextT>::print(raw_ostream &OS) const {
}
}
+ if (!UsesOutsideCycleWithDivergentExit.empty()) {
+ OS << "\nUSES OUTSIDE CYCLES WITH DIVERGENT EXIT:\n";
+
+ for (auto [Inst, UseInst, Cycle] : UsesOutsideCycleWithDivergentExit) {
+ OS << "Inst :" << Context.print(Inst)
+ << "Used by :" << Context.print(UseInst)
+ << "Outside cycle :" << Cycle->print(Context) << "\n\n";
+ }
+ }
+
for (auto &block : F) {
OS << "\nBLOCK " << Context.print(&block) << '\n';
@@ -1210,6 +1240,13 @@ void GenericUniformityAnalysisImpl<ContextT>::print(raw_ostream &OS) const {
}
}
+template <typename ContextT>
+iterator_range<typename GenericUniformityInfo<ContextT>::UOCWDE *>
+GenericUniformityInfo<ContextT>::getUsesOutsideCycleWithDivergentExit() const {
+ return make_range(DA->UsesOutsideCycleWithDivergentExit.begin(),
+ DA->UsesOutsideCycleWithDivergentExit.end());
+}
+
template <typename ContextT>
bool GenericUniformityInfo<ContextT>::hasDivergence() const {
return DA->hasDivergence();
diff --git a/llvm/include/llvm/ADT/GenericUniformityInfo.h b/llvm/include/llvm/ADT/GenericUniformityInfo.h
index e53afccc020b46..660fd6d46114d7 100644
--- a/llvm/include/llvm/ADT/GenericUniformityInfo.h
+++ b/llvm/include/llvm/ADT/GenericUniformityInfo.h
@@ -40,6 +40,10 @@ template <typename ContextT> class GenericUniformityInfo {
using CycleInfoT = GenericCycleInfo<ContextT>;
using CycleT = typename CycleInfoT::CycleT;
+ // Use outside cycle with divergent exit
+ using UOCWDE =
+ std::tuple<const InstructionT *, const InstructionT *, const CycleT *>;
+
GenericUniformityInfo(const DominatorTreeT &DT, const CycleInfoT &CI,
const TargetTransformInfo *TTI = nullptr);
GenericUniformityInfo() = default;
@@ -78,6 +82,8 @@ template <typename ContextT> class GenericUniformityInfo {
void print(raw_ostream &Out) const;
+ iterator_range<UOCWDE *> getUsesOutsideCycleWithDivergentExit() const;
+
private:
using ImplT = GenericUniformityAnalysisImpl<ContextT>;
diff --git a/llvm/lib/Analysis/UniformityAnalysis.cpp b/llvm/lib/Analysis/UniformityAnalysis.cpp
index 592de1067e191a..ad40b6294f3afb 100644
--- a/llvm/lib/Analysis/UniformityAnalysis.cpp
+++ b/llvm/lib/Analysis/UniformityAnalysis.cpp
@@ -79,13 +79,12 @@ template <>
void llvm::GenericUniformityAnalysisImpl<
SSAContext>::propagateTemporalDivergence(const Instruction &I,
const Cycle &DefCycle) {
- if (isDivergent(I))
- return;
for (auto *User : I.users()) {
auto *UserInstr = cast<Instruction>(User);
if (DefCycle.contains(UserInstr->getParent()))
continue;
markDivergent(*UserInstr);
+ recordUseOutsideCycleWithDivergentExit(&I, UserInstr, &DefCycle);
}
}
diff --git a/llvm/lib/CodeGen/MachineUniformityAnalysis.cpp b/llvm/lib/CodeGen/MachineUniformityAnalysis.cpp
index a4b78c1c75ceb0..fb5b19968ac78a 100644
--- a/llvm/lib/CodeGen/MachineUniformityAnalysis.cpp
+++ b/llvm/lib/CodeGen/MachineUniformityAnalysis.cpp
@@ -117,12 +117,12 @@ void llvm::GenericUniformityAnalysisImpl<MachineSSAContext>::
if (!Op.getReg().isVirtual())
continue;
auto Reg = Op.getReg();
- if (isDivergent(Reg))
- continue;
- for (MachineInstr &UserInstr : RegInfo.use_instructions(Reg)) {
+ for (const MachineInstr &UserInstr : RegInfo.use_instructions(Reg)) {
if (DefCycle.contains(UserInstr.getParent()))
continue;
markDivergent(UserInstr);
+
+ recordUseOutsideCycleWithDivergentExit(&I, &UserInstr, &DefCycle);
}
}
}
@@ -193,7 +193,7 @@ INITIALIZE_PASS_END(MachineUniformityAnalysisPass, "machine-uniformity",
void MachineUniformityAnalysisPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
- AU.addRequired<MachineCycleInfoWrapperPass>();
+ AU.addRequiredTransitive<MachineCycleInfoWrapperPass>();
AU.addRequired<MachineDominatorTreeWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp
index fb258547e8fb90..d8cd1e7379c93f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGlobalISelDivergenceLowering.cpp
@@ -16,6 +16,7 @@
//===----------------------------------------------------------------------===//
#include "AMDGPU.h"
+#include "AMDGPUGlobalISelUtils.h"
#include "SILowerI1Copies.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
@@ -45,7 +46,6 @@ class AMDGPUGlobalISelDivergenceLowering : public MachineFunctionPass {
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.setPreservesCFG();
AU.addRequired<MachineDominatorTreeWrapperPass>();
AU.addRequired<MachinePostDominatorTreeWrapperPass>();
AU.addRequired<MachineUniformityAnalysisPass>();
@@ -78,6 +78,8 @@ class DivergenceLoweringHelper : public PhiLoweringHelper {
Register DstReg, Register PrevReg,
Register CurReg) override;
void constrainAsLaneMask(Incoming &In) override;
+
+ bool lowerTempDivergence();
};
DivergenceLoweringHelper::DivergenceLoweringHelper(
@@ -188,6 +190,37 @@ void DivergenceLoweringHelper::constrainAsLaneMask(Incoming &In) {
In.Reg = Copy.getReg(0);
}
+void replaceUsesOfRegInInstWith(Register Reg, MachineInstr *Inst,
+ Register NewReg) {
+ for (MachineOperand &Op : Inst->operands()) {
+ if (Op.isReg() && Op.getReg() == Reg)
+ Op.setReg(NewReg);
+ }
+}
+
+bool DivergenceLoweringHelper::lowerTempDivergence() {
+ AMDGPU::IntrinsicLaneMaskAnalyzer ILMA(*MF);
+
+ for (auto [Inst, UseInst, _] : MUI->getUsesOutsideCycleWithDivergentExit()) {
+ Register Reg = Inst->getOperand(0).getReg();
+ if (MRI->getType(Reg) == LLT::scalar(1) || MUI->isDivergent(Reg) ||
+ ILMA.isS32S64LaneMask(Reg))
+ continue;
+
+ MachineInstr *MI = const_cast<MachineInstr *>(Inst);
+ MachineBasicBlock *MBB = MI->getParent();
+ B.setInsertPt(*MBB, MBB->SkipPHIsAndLabels(std::next(MI->getIterator())));
+
+ Register VgprReg = MRI->createGenericVirtualRegister(MRI->getType(Reg));
+ B.buildInstr(AMDGPU::COPY, {VgprReg}, {Reg})
+ .addUse(ExecReg, RegState::Implicit);
+
+ replaceUsesOfRegInInstWith(Reg, const_cast<MachineInstr *>(UseInst),
+ VgprReg);
+ }
+ return false;
+}
+
} // End anonymous namespace.
INITIALIZE_PASS_BEGIN(AMDGPUGlobalISelDivergenceLowering, DEBUG_TYPE,
@@ -218,5 +251,15 @@ bool AMDGPUGlobalISelDivergenceLowering::runOnMachineFunction(
DivergenceLoweringHelper Helper(&MF, &DT, &PDT, &MUI);
- return Helper.lowerPhis();
+ bool Changed = false;
+ // Temporal divergence lowering needs to inspect list of instructions used
+ // outside cycle with divergent exit provided by uniformity analysis. Uniform
+ // instructions from the list require lowering, no instruction is deleted.
+ // Thus it needs to be run before lowerPhis that deletes phis that require
+ // lowering and replaces them with new instructions.
+
+ // Non-i1 temporal divergence lowering.
+ Changed |= Helper.lowerTempDivergence();
+ Changed |= Helper.lowerPhis();
+ return Changed;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankSelect.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankSelect.cpp
index abd7dcecc93ad0..452d75498545ce 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankSelect.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankSelect.cpp
@@ -83,6 +83,7 @@ class RegBankSelectHelper {
MachineRegisterInfo &MRI;
AMDGPU::IntrinsicLaneMaskAnalyzer &ILMA;
const MachineUniformityInfo &MUI;
+ const SIRegisterInfo &TRI;
const RegisterBank *SgprRB;
const RegisterBank *VgprRB;
const RegisterBank *VccRB;
@@ -91,14 +92,29 @@ class RegBankSelectHelper {
RegBankSelectHelper(MachineIRBuilder &B,
AMDGPU::IntrinsicLaneMaskAnalyzer &ILMA,
const MachineUniformityInfo &MUI,
- const RegisterBankInfo &RBI)
- : B(B), MRI(*B.getMRI()), ILMA(ILMA), MUI(MUI),
+ const SIRegisterInfo &TRI, const RegisterBankInfo &RBI)
+ : B(B), MRI(*B.getMRI()), ILMA(ILMA), MUI(MUI), TRI(TRI),
SgprRB(&RBI.getRegBank(AMDGPU::SGPRRegBankID)),
VgprRB(&RBI.getRegBank(AMDGPU::VGPRRegBankID)),
VccRB(&RBI.getRegBank(AMDGPU::VCCRegBankID)) {}
+ // Temporal divergence copy: COPY to vgpr with implicit use of $exec inside of
+ // the cycle
+ // Note: uniformity analysis does not consider that registers with vgpr def
+ // are divergent (you can have uniform value in vgpr).
+ // - TODO: implicit use of $exec could be implemented as indicator that
+ // instruction is divergent
+ bool isTemporalDivergenceCopy(Register Reg) {
+ MachineInstr *MI = MRI.getVRegDef(Reg);
+ if (!MI->isCopy() || MI->getNumImplicitOperands() != 1)
+ return false;
+
+ return MI->implicit_operands().begin()->getReg() == TRI.getExec();
+ }
+
const RegisterBank *getRegBankToAssign(Register Reg) {
- if (MUI.isUniform(Reg) || ILMA.isS32S64LaneMask(Reg))
+ if (!isTemporalDivergenceCopy(Reg) &&
+ (MUI.isUniform(Reg) || ILMA.isS32S64LaneMask(Reg)))
return SgprRB;
if (MRI.getType(Reg) == LLT::scalar(1))
return VccRB;
@@ -209,7 +225,7 @@ bool AMDGPURegBankSelect::runOnMachineFunction(MachineFunction &MF) {
getAnalysis<MachineUniformityAnalysisPass>().getUniformityInfo();
MachineRegisterInfo &MRI = *B.getMRI();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
- RegBankSelectHelper RBSHelper(B, ILMA, MUI, *ST.getRegBankInfo());
+ RegBankSelectHelper RBSHelper(B, ILMA, MUI, *ST.getRegisterInfo(), *ST.getRegBankInfo());
// Virtual registers at this point don't have register banks.
// Virtual registers in def and use operands of already inst-selected
// instruction have register class.
diff --git a/llvm/lib/Target/AMDGPU/SILowerI1Copies.h b/llvm/lib/Target/AMDGPU/SILowerI1Copies.h
index a407e3e014edd7..fd90328c2b9269 100644
--- a/llvm/lib/Target/AMDGPU/SILowerI1Copies.h
+++ b/llvm/lib/Target/AMDGPU/SILowerI1Copies.h
@@ -15,6 +15,7 @@
#include "GCNSubtarget.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachinePostDominators.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineSSAUpdater.h"
namespace llvm {
@@ -72,6 +73,11 @@ class PhiLoweringHelper {
LaneMaskRegAttrs = MRI->getVRegAttrs(LaneMask);
}
+ void
+ initializeLaneMaskRegisterAttributes(MachineRegisterInfo::VRegAttrs Attrs) {
+ LaneMaskRegAttrs = Attrs;
+ }
+
bool isLaneMaskReg(Register Reg) const {
return TII->getRegisterInfo().isSGPRReg(*MRI, Reg) &&
TII->getRegisterInfo().getRegSizeInBits(Reg, *MRI) ==
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir
index 6ce2f9b7a2c77c..f244639ed97623 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.mir
@@ -471,7 +471,7 @@ body: |
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY3]](s1), %bb.0, %56(s1), %bb.4
+ ; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY3]](s1), %bb.0, %57(s1), %bb.4
; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI %29(s32), %bb.4, [[DEF]](s32), %bb.0
; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
; GFX10-NEXT: G_BRCOND [[COPY4]](s1), %bb.5
@@ -486,6 +486,7 @@ body: |
; GFX10-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; GFX10-NEXT: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:_(s32) = G_AMDGPU_BUFFER_LOAD [[UV]](<4 x s32>), [[C7]](s32), [[PHI2]], [[C7]], 0, 0, 0 :: (dereferenceable load (s32), align 1, addrspace 8)
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AMDGPU_BUFFER_LOAD1]], [[PHI4]]
+ ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32), implicit $exec_lo
; GFX10-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[PHI3]], [[C8]]
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
@@ -497,11 +498,11 @@ body: |
; GFX10-NEXT: bb.4:
; GFX10-NEXT: successors: %bb.2(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[ADD]](s32), [[AMDGPU_BUFFER_LOAD]]
+ ; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY5]](s32), [[AMDGPU_BUFFER_LOAD]]
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP2]]
; GFX10-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s1)
; GFX10-NEXT: [[C10:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[C10]](s1)
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[C10]](s1)
; GFX10-NEXT: G_BR %bb.2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.5:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir
index 2299191d88b766..87e4f4b666d57e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.mir
@@ -544,6 +544,7 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %11(s32), %bb.6, [[C]](s32), %bb.0
; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %13(s32), %bb.6
+ ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[PHI1]](s32), implicit $exec_lo
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.2:
; GFX10-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
@@ -567,8 +568,8 @@ body: |
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[PHI1]]
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
- ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[C2]](s1)
- ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[COPY6]](s1)
+ ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[C2]](s1)
+ ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[COPY7]](s1)
; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.6, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.5
; GFX10-NEXT: {{ $}}
@@ -578,19 +579,19 @@ body: |
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C3]]
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
- ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY7]](s1), $exec_lo, implicit-def $scc
- ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY8]](s1), implicit-def $scc
+ ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
+ ; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
+ ; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY9]](s1), implicit-def $scc
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.6:
; GFX10-NEXT: successors: %bb.7(0x04000000), %bb.1(0x7c000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[COPY6]](s1), %bb.4, [[S_OR_B32_]](s1), %bb.5
+ ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[COPY7]](s1), %bb.4, [[S_OR_B32_]](s1), %bb.5
; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI [[ADD]](s32), %bb.5, [[DEF]](s32), %bb.4
- ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
+ ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF1]](s32)
- ; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[COPY9]](s1), [[PHI]](s32)
+ ; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[COPY10]](s1), [[PHI]](s32)
; GFX10-NEXT: SI_LOOP [[INT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX10-NEXT: G_BR %bb.7
; GFX10-NEXT: {{ $}}
@@ -604,7 +605,7 @@ body: |
; GFX10-NEXT: bb.8:
; GFX10-NEXT: successors: %bb.9(0x80000000)
; GFX10-NEXT: {{ $}}
- ; GFX10-NEXT: G_STORE [[PHI1]](s32), [[MV1]](p1) :: (store (s32), addrspace 1)
+ ; GFX10-NEXT: G_STORE [[COPY6]](s32), [[MV1]](p1) :: (store (s32), addrspace 1)
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.9:
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF2]](s32)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll
index 6f1797455c6a83..1de0b52f36a48a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll
@@ -5,20 +5,20 @@ define void @temporal_divergent_i32(float %val, ptr %addr) {
; GFX10-LABEL: temporal_divergent_i32:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: s_mov_b32 s4, -1
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: s_mov_b32 s5, -1
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: .LBB0_1: ; %loop
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: s_add_i32 s4, s4, 1
-; GFX10-NEXT: v_cvt_f32_u32_e32 v3, s4
+; GFX10-NEXT: s_add_i32 s5, s5, 1
+; GFX10-NEXT: v_cvt_f32_u32_e32 v3, s5
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v3, v0
-; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: v_mov_b32_e32 v3, s5
+; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB0_1
; GFX10-NEXT: ; %bb.2: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
-; GFX10-NEXT: v_mov_b32_e32 v0, s4
-; GFX10-NEXT: flat_store_dword v[1:2], v0
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
+; GFX10-NEXT: flat_store_dword v[1:2], v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_setpc_b64 s[30:31]
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.mir
index 996815e2d38fc3..975b55ce5375c3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.mir
@@ -25,6 +25,7 @@ body: |
; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %9(s32), %bb.1
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C2]]
+ ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD]](s32), implicit $exec_lo
; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[ADD]](s32)
; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[COPY]]
; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[FCMP]](s1), [[PHI]](s32)
@@ -33,7 +34,7 @@ body: |
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: bb.2:
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT]](s32)
- ; GFX10-NEXT: G_STORE [[ADD]](s32), [[MV]](p0) :: (store (s32))
+ ; GFX10-NEXT: G_STORE [[COPY3]](s32), [[MV]](p0) :: (store (s32))
; GFX10-NEXT: SI_RETURN
bb.0:
successors: %bb.1(0x80000000)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
index a02e623efb3a32..aca00b9717a9fc 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
@@ -450,19 +450,20 @@ define amdgpu_ps void @divergent_because_of_temporal_divergent_use(float %val, p
;
; NEW_RBS-LABEL: divergent_because_of_temporal_divergent_use:
; NEW_RBS: ; %bb.0: ; %entry
-; NEW_RBS-NEXT: s_mov_b32 s0, -1
-; NEW_RBS-NEXT: s_mov_b32 s1, 0
+; NEW_RBS-NEXT: s_mov_b32 s1, -1
+; NEW_RBS-NEXT: s_mov_b32 s0, 0
; NEW_RBS-NEXT: .LBB15_1: ; %loop
; NEW_RBS-NEXT: ; =>This Inner Loop Header: Depth=1
-; NEW_RBS-NEXT: s_add_i32 s0, s0, 1
-; NEW_RBS-NEXT: v_cvt_f32_u32_e32 v3, s0
+; NEW_RBS-NEXT: s_add_i32 s1, s1, 1
+; NEW_RBS-NEXT: v_cvt_f32_u32_e32 v3, s1
; NEW_RBS-NEXT: v_cmp_gt_f32_e32 vcc_lo, v3, v0
-; NEW_RBS-NEXT: s_or_b32 s1, vcc_lo, s1
-; NEW_RBS-NEXT: s_andn2_b32 exec_lo, exec_lo, s1
+; NEW_RBS-NEXT: v_mov_b32_e32 v3, s1
+; NEW_RBS-NEXT: s_or_b32 s0, vcc_lo, s0
+; NEW_RBS-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; NEW_RBS-NEXT: s_cbranch_execnz .LBB15_1
; NEW_RBS-NEXT: ; %bb.2: ; %exit
-; NEW_RBS-NEXT: s_or_b32 exec_lo, exec_lo, s1
-; NEW_RBS-NEXT: v_mul_lo_u32 v0, s0, 10
+; NEW_RBS-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; NEW_RBS-NEXT: v_mul_lo_u32 v0, v3, 10
; NEW_RBS-NEXT: global_store_dword v[1:2], v0, off
; NEW_RBS-NEXT: s_endpgm
entry:
More information about the llvm-branch-commits
mailing list