[llvm] SSA regalloc integration draft (PR #156049)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 29 09:00:57 PDT 2025
https://github.com/alex-t created https://github.com/llvm/llvm-project/pull/156049
SSA Register Allocation — Draft PR
📌 Problem Statement
This PR demonstrates the end-to-end prototype of SSA-based Register Allocation for the AMDGPU backend.
The stack is split into several independent components (see below), but this Draft PR provides a single integration point for discussion, design review, and documentation.
🧩 Components
Next-Use Analysis
Tracks future use distances of virtual registers and subregisters.
➡️ [PR link]
SSA Spiller
Performs spilling on SSA form using Next-Use information.
➡️ [PR link]
SSA Rebuilder
Reconstructs SSA after spill/reload insertion.
➡️ [PR link]
🔗 Related PRs / Branches
next-use → [link]
spiller → [link]
ssa-rebuilder → [link]
Integration branch: ssa-stack-wip
🧪 Testing
Minimal MIR tests included for each component.
Attribute-driven pressure tests (amdgpu-num-vgprs) used to trigger spilling.
End-to-end integration tests are kept in ssa-stack-wip.
🚧 Current Limitations / TODO
PHI handling in SSA Rebuilder requires further validation.
Spiller does not yet optimize reload hoisting.
Integration with SILowerControlFlow under investigation.
CI failures expected due to missing coverage tests.
📖 Notes
This Draft PR is not intended for upstream landing as-is.
It serves as a design reference and discussion hub.
Final upstream submission will be split into independent, reviewable PRs.
>From e529f3eccff153e2b14accc23db3e16ae2de94f7 Mon Sep 17 00:00:00 2001
From: Alexander <alexander.timofeev at amd.com>
Date: Tue, 26 Nov 2024 13:57:47 +0100
Subject: [PATCH 01/46] SSA Spiller WIP 26.11.24
---
llvm/lib/Target/AMDGPU/AMDGPU.h | 16 +
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 189 +++++++
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 197 +++++++
llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def | 2 +-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 533 ++++++++++++++++++
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.h | 25 +
.../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 1 +
llvm/lib/Target/AMDGPU/CMakeLists.txt | 2 +
8 files changed, 964 insertions(+), 1 deletion(-)
create mode 100644 llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
create mode 100644 llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
create mode 100644 llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
create mode 100644 llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.h
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index ebe38de1636be..32797cb0393c6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -39,6 +39,8 @@ FunctionPass *createSIAnnotateControlFlowLegacyPass();
FunctionPass *createSIFoldOperandsLegacyPass();
FunctionPass *createSIPeepholeSDWALegacyPass();
FunctionPass *createSILowerI1CopiesLegacyPass();
+FunctionPass *createAMDGPUSSASpillerLegacyPass();
+FunctionPass *createAMDGPUGlobalISelDivergenceLoweringPass();
FunctionPass *createSIShrinkInstructionsLegacyPass();
FunctionPass *createSILoadStoreOptimizerLegacyPass();
FunctionPass *createSIWholeQuadModeLegacyPass();
@@ -93,6 +95,14 @@ class SILowerI1CopiesPass : public PassInfoMixin<SILowerI1CopiesPass> {
MachineFunctionAnalysisManager &MFAM);
};
+class AMDGPUSSASpillerPass : public PassInfoMixin<AMDGPUSSASpillerPass> {
+public:
+ AMDGPUSSASpillerPass() = default;
+
+ PreservedAnalyses run(MachineFunction &MF,
+ MachineFunctionAnalysisManager &MFAM);
+};
+
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &);
void initializeAMDGPUAlwaysInlinePass(PassRegistry&);
@@ -197,6 +207,12 @@ extern char &SILowerWWMCopiesLegacyID;
void initializeSILowerI1CopiesLegacyPass(PassRegistry &);
extern char &SILowerI1CopiesLegacyID;
+void initializeAMDGPUSSASpillerLegacyPass(PassRegistry &);
+extern char &AMDGPUSSASpillerLegacyID;
+
+void initializeAMDGPUNextUseAnalysisWrapperPass(PassRegistry&);
+extern char &AMDGPUNextUseAnalysisID;
+
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &);
extern char &AMDGPUGlobalISelDivergenceLoweringID;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
new file mode 100644
index 0000000000000..25dcd52a413e4
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -0,0 +1,189 @@
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionAnalysis.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/Passes/PassPlugin.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Passes/PassBuilder.h"
+
+#include "AMDGPU.h"
+
+#include "AMDGPUNextUseAnalysis.h"
+
+#define DEBUG_TYPE "amdgpu-next-use"
+
+using namespace llvm;
+
+//namespace {
+
+
+void NextUseResult::init(const MachineFunction &MF) {
+
+ for (auto L : LI->getLoopsInPreorder()) {
+ SmallVector<MachineBasicBlock *> Exiting;
+ L->getExitingBlocks(Exiting);
+ for (auto B : Exiting) {
+ for (auto S : successors(B)) {
+ if (!L->contains(S)) {
+ EdgeWeigths[B->getNumber()] = S->getNumber();
+ }
+ }
+ }
+ }
+}
+
+void NextUseResult::analyze(const MachineFunction &MF) {
+ bool Changed = true;
+ while(Changed) {
+ Changed = false;
+ for (auto MBB : post_order(&MF)) {
+ SlotIndex Begin = Indexes->getMBBStartIdx(MBB);
+ VRegDistances Curr, Prev;
+ if (auto CurrMapRef = getVRegMap(MBB)) {
+ Prev = CurrMapRef.value();
+ }
+
+
+ for (auto Succ : successors(MBB)) {
+ auto SuccMapRef = getVRegMap(Succ);
+
+ if (SuccMapRef) {
+ // Check if the edge from MBB to Succ goes out of the Loop
+ unsigned Weight = 0;
+ if (EdgeWeigths.contains(MBB->getNumber())) {
+ int SuccNum = EdgeWeigths[MBB->getNumber()];
+ if (Succ->getNumber() == SuccNum)
+ Weight = Infinity;
+ }
+ mergeDistances(Curr, SuccMapRef.value(), Weight);
+ }
+ }
+ unsigned MBBLen =
+ Begin.distance(Indexes->getMBBEndIdx(MBB)) / SlotIndex::InstrDist;
+ for (auto &P : Curr) {
+ P.second += MBBLen;
+ }
+
+ NextUseMap[MBB->getNumber()] = std::move(Curr);
+
+ for (auto &MI : make_range(MBB->rbegin(), MBB->rend())) {
+ for (auto &MO : MI.operands()) {
+ if (MO.isReg() && MO.getReg().isVirtual() && MO.isUse()) {
+ Register VReg = MO.getReg();
+ MachineInstr *Def = MRI->getVRegDef(VReg);
+ if (Def->getParent() == MBB)
+ // defined in block - skip it
+ continue;
+ unsigned Distance =
+ Begin.distance(Indexes->getInstructionIndex(MI)) /
+ SlotIndex::InstrDist;
+ setNextUseDistance(MBB, VReg, Distance);
+ UsedInBlock[MBB->getNumber()].insert(VReg);
+ }
+ }
+ }
+ VRegDistances &Next = NextUseMap[MBB->getNumber()];
+ dbgs() << "MBB_" << MBB->getNumber() << "\n";
+ printVregDistancesD(Next);
+ bool Changed4MBB = diff(Prev, Next);
+
+ Changed |= Changed4MBB;
+ }
+ }
+}
+
+unsigned NextUseResult::getNextUseDistance(const MachineInstr &MI, const Register Vreg) {
+ unsigned Dist = Infinity;
+ const MachineBasicBlock *MBB = MI.getParent();
+ SlotIndex Begin = Indexes->getMBBStartIdx(MBB->getNumber());
+ SlotIndex Idx = Indexes->getInstructionIndex(MI);
+ int IDist = Begin.distance(Idx)/SlotIndex::InstrDist;
+ if (auto VMapRef = getVRegMap(MBB)) {
+ VRegDistances &VRegs = VMapRef.value();
+ if (VRegs.contains(Vreg)) {
+ int UseDist = VRegs[Vreg];
+ if ((UseDist - IDist) < 0) {
+ for (auto Succ : successors(MBB)) {
+ if (auto SuccVMapRef = getVRegMap(Succ)) {
+ VRegDistances &SuccVRegs = SuccVMapRef.value();
+ if (SuccVRegs.contains(Vreg)) {
+ Dist = std::min(Dist, SuccVRegs[Vreg]);
+ }
+ }
+ }
+ } else {
+ Dist = UseDist - IDist;
+ }
+ return Dist;
+ }
+ }
+ return Infinity;
+}
+
+AMDGPUNextUseAnalysis::Result
+AMDGPUNextUseAnalysis::run(MachineFunction &MF,
+ MachineFunctionAnalysisManager &MFAM) {
+ return AMDGPUNextUseAnalysis::Result(MF,
+ MFAM.getResult<SlotIndexesAnalysis>(MF),
+ MFAM.getResult<MachineLoopAnalysis>(MF));
+}
+
+AnalysisKey AMDGPUNextUseAnalysis::Key;
+
+//} // namespace
+
+extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo
+llvmGetPassPluginInfo() {
+ return {LLVM_PLUGIN_API_VERSION, "AMDGPUNextUseAnalysisPass",
+ LLVM_VERSION_STRING, [](PassBuilder &PB) {
+ PB.registerAnalysisRegistrationCallback(
+ [](MachineFunctionAnalysisManager &MFAM) {
+ MFAM.registerPass([] { return AMDGPUNextUseAnalysis(); });
+ });
+ }};
+}
+
+char AMDGPUNextUseAnalysisWrapper::ID = 0;
+char &llvm::AMDGPUNextUseAnalysisID = AMDGPUNextUseAnalysisWrapper::ID;
+INITIALIZE_PASS_BEGIN(AMDGPUNextUseAnalysisWrapper, "amdgpu-next-use",
+ "AMDGPU Next Use Analysis", false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(SlotIndexesWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
+INITIALIZE_PASS_END(AMDGPUNextUseAnalysisWrapper, "amdgpu-next-use",
+ "AMDGPU Next Use Analysis", false, false)
+
+bool AMDGPUNextUseAnalysisWrapper::runOnMachineFunction(
+ MachineFunction &MF) {
+ NU.Indexes = &getAnalysis<SlotIndexesWrapperPass>().getSI();
+ NU.LI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
+ NU.MRI = &MF.getRegInfo();
+ NU.init(MF);
+ NU.analyze(MF);
+ LLVM_DEBUG(NU.dump());
+ return false;
+}
+
+void AMDGPUNextUseAnalysisWrapper::getAnalysisUsage(
+ AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequiredTransitiveID(MachineLoopInfoID);
+ AU.addPreservedID(MachineLoopInfoID);
+ AU.addRequiredTransitiveID(MachineDominatorsID);
+ AU.addPreservedID(MachineDominatorsID);
+ AU.addPreserved<SlotIndexesWrapperPass>();
+ AU.addRequiredTransitive<SlotIndexesWrapperPass>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+AMDGPUNextUseAnalysisWrapper::AMDGPUNextUseAnalysisWrapper()
+ : MachineFunctionPass(ID) {
+ initializeAMDGPUNextUseAnalysisWrapperPass(*PassRegistry::getPassRegistry());
+}
\ No newline at end of file
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
new file mode 100644
index 0000000000000..61df809c1e3b8
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -0,0 +1,197 @@
+//===- AMDGPUNextUseAnalysis.h ----------------------------------------*- C++-
+//*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_NEXT_USE_ANALYSIS_H
+#define LLVM_LIB_TARGET_AMDGPU_NEXT_USE_ANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+
+#include <limits>
+
+using namespace llvm;
+
+// namespace {
+
+class NextUseResult {
+ friend class AMDGPUNextUseAnalysisWrapper;
+ SlotIndexes *Indexes;
+ const MachineRegisterInfo *MRI;
+ MachineLoopInfo *LI;
+
+public:
+ using VRegDistances = DenseMap<Register, unsigned>;
+
+private:
+ DenseMap<unsigned, VRegDistances> NextUseMap;
+ DenseMap<unsigned, SetVector<Register>> UsedInBlock;
+ DenseMap<int, int> EdgeWeigths;
+ const uint16_t Infinity = std::numeric_limits<unsigned short>::max();
+ void init(const MachineFunction &MF);
+ void analyze(const MachineFunction &MF);
+ bool diff(const VRegDistances &LHS, const VRegDistances &RHS) {
+ for (auto P : LHS) {
+ if (!RHS.contains(P.getFirst()) ||
+ RHS.lookup(P.getFirst()) != P.getSecond())
+ return true;
+ }
+ for (auto P : RHS) {
+ if (!LHS.contains(P.getFirst()))
+ return true;
+ }
+ return false;
+ }
+
+ void printVregDistances(const VRegDistances &D,
+ raw_ostream &O = dbgs()) const {
+ O << "\n";
+ for (auto P : D) {
+ O << "Vreg: " << printReg(P.first) << "[ " << P.second << "]\n";
+ }
+ }
+
+ void printVregDistancesD(const VRegDistances &D) const {
+ dbgs() << "\n";
+ for (auto P : D) {
+ dbgs() << "Vreg: " << printReg(P.first) << "[ " << P.second << "]\n";
+ }
+ }
+
+ void dump(raw_ostream &O = dbgs()) const {
+ for (auto P : NextUseMap) {
+ O << "\nMBB_" << P.first << "\n";
+ printVregDistances(P.second, O);
+ }
+ }
+
+ std::optional<std::reference_wrapper<VRegDistances>>
+ getVRegMap(const MachineBasicBlock *MBB) {
+ if (NextUseMap.contains(MBB->getNumber())) {
+ return NextUseMap[MBB->getNumber()];
+ }
+ return std::nullopt;
+ }
+
+ VRegDistances &mergeDistances(VRegDistances &LHS, const VRegDistances &RHS,
+ unsigned Weight = 0) {
+ for (auto Pair : LHS) {
+ Register VReg = Pair.getFirst();
+ if (RHS.contains(VReg)) {
+ LHS[VReg] = std::min(Pair.getSecond(), RHS.lookup(VReg) + Weight);
+ }
+ }
+ for (auto Pair : RHS) {
+ if (LHS.contains(Pair.getFirst()))
+ continue;
+ LHS[Pair.getFirst()] = Pair.getSecond() + Weight;
+ }
+ return LHS;
+ }
+
+ void setNextUseDistance(const MachineBasicBlock *MBB, Register VReg,
+ int Distance) {
+ auto VMapRef = getVRegMap(MBB);
+ if (!VMapRef)
+ VMapRef = NextUseMap[MBB->getNumber()];
+ VRegDistances &VRegs = VMapRef.value();
+ VRegs[VReg] = Distance;
+ }
+
+ void clear() {
+ NextUseMap.clear();
+ EdgeWeigths.clear();
+ }
+
+public:
+ NextUseResult() = default;
+ NextUseResult(const MachineFunction &MF, SlotIndexes &SI, MachineLoopInfo &LI)
+ : Indexes(&SI), MRI(&MF.getRegInfo()), LI(&LI) {
+ init(MF);
+ analyze(MF);
+ }
+ ~NextUseResult() { clear(); }
+
+ void print(raw_ostream &O) const { dump(O); }
+
+ unsigned getNextUseDistance(const MachineInstr &MI, Register Vreg);
+
+ bool isDead(MachineBasicBlock::iterator Pos, Register R) {
+ if (!R.isVirtual())
+ report_fatal_error("Only virtual registers allowed!\n", true);
+ return getNextUseDistance(*Pos, R) == Infinity;
+ }
+
+ void getSortedForInstruction(const MachineInstr &MI,
+ SetVector<Register> &Regs) {
+ auto SortByDist = [&](const Register LHS, const Register RHS) {
+ return getNextUseDistance(MI, LHS) < getNextUseDistance(MI, RHS);
+ };
+ SmallVector<Register> Tmp(Regs.takeVector());
+ sort(Tmp, SortByDist);
+ Regs.insert(Tmp.begin(), Tmp.end());
+ }
+
+ std::vector<std::pair<Register, unsigned>> getSortedByDistance(
+ const MachineInstr &MI, std::vector<Register> &W) {
+ std::vector<std::pair<Register, unsigned>> Result;
+ auto compareByVal = [](std::pair<Register, unsigned> &LHS,
+ std::pair<Register, unsigned> &RHS) -> bool {
+ return LHS.second < RHS.second;
+ };
+
+ for (auto R : W) {
+ dbgs() << printReg(R);
+ Result.push_back(std::make_pair(R, getNextUseDistance(MI, R)));
+ }
+
+ std::sort(Result.begin(), Result.end(), compareByVal);
+
+ return std::move(Result);
+ }
+
+ SetVector<Register> usedInBlock(MachineBasicBlock &MBB) {
+ return std::move(UsedInBlock[MBB.getNumber()]);
+ }
+};
+
+class AMDGPUNextUseAnalysis : public AnalysisInfoMixin<AMDGPUNextUseAnalysis> {
+ friend AnalysisInfoMixin<AMDGPUNextUseAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ using Result = NextUseResult;
+ Result run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM);
+};
+
+class AMDGPUNextUseAnalysisWrapper : public MachineFunctionPass {
+ NextUseResult NU;
+
+public:
+ static char ID;
+
+ AMDGPUNextUseAnalysisWrapper();
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ /// Pass entry point; Calculates LiveIntervals.
+ bool runOnMachineFunction(MachineFunction &) override;
+ void releaseMemory() override { NU.clear(); }
+
+ /// Implement the dump method.
+ void print(raw_ostream &O, const Module * = nullptr) const override {
+ NU.print(O);
+ }
+
+ NextUseResult &getNU() { return NU; }
+};
+
+//}
+
+#endif // LLVM_LIB_TARGET_AMDGPU_NEXT_USE_ANALYSIS_H
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
index 48448833721bf..783257f4b267c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
@@ -141,7 +141,7 @@ MACHINE_FUNCTION_PASS("si-post-ra-bundler", SIPostRABundlerPass())
MACHINE_FUNCTION_PASS("si-pre-allocate-wwm-regs", SIPreAllocateWWMRegsPass())
MACHINE_FUNCTION_PASS("si-pre-emit-peephole", SIPreEmitPeepholePass())
MACHINE_FUNCTION_PASS("si-shrink-instructions", SIShrinkInstructionsPass())
-MACHINE_FUNCTION_PASS("si-wqm", SIWholeQuadModePass())
+MACHINE_FUNCTION_PASS("amdgpu-ssa-spiller", AMDGPUSSASpillerPass())
#undef MACHINE_FUNCTION_PASS
#define DUMMY_MACHINE_FUNCTION_PASS(NAME, CREATE_PASS)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
new file mode 100644
index 0000000000000..9f87d0f5df202
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -0,0 +1,533 @@
+#include "AMDGPU.h"
+#include "GCNSubtarget.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Passes/PassPlugin.h"
+#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include "AMDGPUNextUseAnalysis.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "amdgpu-ssa-spiller"
+
+namespace {
+
+class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
+ LiveVariables &LV;
+ MachineLoopInfo &LI;
+ MachineDominatorTree &MDT;
+ AMDGPUNextUseAnalysis::Result &NU;
+ const MachineRegisterInfo *MRI;
+ const SIRegisterInfo *TRI;
+
+ using RegisterSet = SetVector<Register>;
+
+ struct SpillInfo {
+ //MachineBasicBlock *Parent;
+ RegisterSet ActiveSet;
+ RegisterSet SpillSet;
+ };
+
+ unsigned NumAvailableSGPRs;
+ unsigned NumAvailableVGPRs;
+ unsigned NumAvailableAGPRs;
+ DenseMap<unsigned, SpillInfo> RegisterMap;
+ DenseMap<unsigned, unsigned> PostponedLoopLatches;
+ DenseMap<unsigned, SmallVector<unsigned>> LoopHeader2Latches;
+
+ void init(const MachineFunction &MF) {
+ const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
+ MRI = &MF.getRegInfo();
+ TRI = ST.getRegisterInfo();
+ NumAvailableVGPRs = ST.getTotalNumVGPRs();
+ NumAvailableSGPRs = ST.getTotalNumSGPRs();
+
+ // FIXME: what is real num AGPRs available?
+
+ NumAvailableAGPRs = NumAvailableVGPRs;
+ }
+
+ SpillInfo &getBlockInfo(const MachineBasicBlock &MBB);
+
+ void processFunction(MachineFunction &MF);
+ void processBlock(MachineBasicBlock &MBB);
+ void processLoop(MachineLoop *L);
+ void connectToPredecessors(MachineBasicBlock &MBB, bool IgnoreLoops = false);
+ void initActiveSetUsualBlock(MachineBasicBlock &MBB);
+ void initActiveSetLoopHeader(MachineBasicBlock &MBB);
+
+ void reloadAtEnd(MachineBasicBlock &MBB, Register VReg);
+ void spillAtEnd(MachineBasicBlock &MBB, Register VReg);
+ void reloadBefore(Register, MachineBasicBlock::iterator InsertBefore);
+ void spillBefore(Register, MachineBasicBlock::iterator InsertBefore);
+
+ SmallVector<unsigned> getLoopMaxRP(MachineLoop *L);
+ void limit(RegisterSet &Active, RegisterSet &Spilled,
+ MachineBasicBlock::iterator I,
+ const RegisterSet Defs = RegisterSet());
+ void limit(RegisterSet &Active, RegisterSet &Spilled,
+ MachineBasicBlock::iterator LimitPoint,
+ MachineBasicBlock::iterator InsertionPoint,
+ RegisterSet RegClassSubset, unsigned Limit);
+ void splitByRegPressureSet(const RegisterSet In, RegisterSet &SGPRS,
+ unsigned &SGPRRP, RegisterSet &VGPRS,
+ unsigned &VGPRRP, RegisterSet &AGPRS,
+ unsigned &AGPRRP);
+ void formActiveSet(const MachineBasicBlock &MBB, const RegisterSet Take,
+ const RegisterSet Cand, MachineLoop *L = nullptr);
+
+public:
+ AMDGPUSSASpiller() = default;
+
+ AMDGPUSSASpiller(LiveVariables &LV, MachineLoopInfo &LI,
+ MachineDominatorTree &MDT, AMDGPUNextUseAnalysis::Result &NU)
+ : LV(LV), LI(LI), MDT(MDT), NU(NU) {}
+ bool run(MachineFunction &MF);
+};
+
+AMDGPUSSASpiller::SpillInfo &
+AMDGPUSSASpiller::getBlockInfo(const MachineBasicBlock &MBB) {
+ if (!RegisterMap.contains(MBB.getNumber()))
+ llvm::report_fatal_error("Incorrect MF walk order");
+ return RegisterMap[MBB.getNumber()];
+}
+
+void AMDGPUSSASpiller::processFunction(MachineFunction &MF) {
+ ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
+ for (auto MBB : RPOT) {
+ if (LI.isLoopHeader(MBB)) {
+ initActiveSetLoopHeader(*MBB);
+ } else {
+ initActiveSetUsualBlock(*MBB);
+ }
+ connectToPredecessors(*MBB);
+ processBlock(*MBB);
+ // We process loop blocks twice: once with Spill/Active sets of
+ // loop latch blocks unknown, and then again as soon as the latch blocks
+ // sets are computed.
+ if (PostponedLoopLatches.contains(MBB->getNumber())) {
+ SmallVector<unsigned> &Latches =
+ LoopHeader2Latches[PostponedLoopLatches[MBB->getNumber()]];
+ remove_if(Latches, [MBB](int Num) { return Num == MBB->getNumber(); });
+ if (Latches.empty()) {
+ processLoop(LI.getLoopFor(MBB));
+ }
+ PostponedLoopLatches.erase(MBB->getNumber());
+ }
+ }
+}
+
+void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
+ auto &Entry = getBlockInfo(MBB);
+ RegisterSet &Active = Entry.ActiveSet;
+ RegisterSet &Spilled = Entry.SpillSet;
+ RegisterSet Reloads;
+ for (auto &I : MBB) {
+ for (auto U : I.uses()) {
+ if (!U.isReg())
+ continue;
+ if (U.getReg().isPhysical())
+ continue;
+ Register VReg = U.getReg();
+ if (Active.insert(VReg)) {
+ // Not in reg, hence, should have been spilled before
+ // TODO: This is ODD as the Spilled set is a union among all
+ // predecessors and should already contain all spilled before!
+ // Spilled.insert(U.getReg());
+ Reloads.insert(VReg);
+ }
+ }
+ RegisterSet Defs;
+ for (auto D : I.defs()) {
+ if (D.getReg().isVirtual())
+ Defs.insert(D.getReg());
+ }
+ limit(Active, Spilled, I);
+ limit(Active, Spilled, std::next(&I), Defs);
+ // FIXME: limit with Defs is assumed to create room for the registers being
+ // defined by I. Calling with std::next(I) makes spills inserted AFTER I!!!
+ Active.insert(Defs.begin(), Defs.end());
+ // Add reloads for VRegs in Reloads before I
+ for (auto R : Reloads)
+ reloadBefore(R, I);
+ }
+}
+
+void AMDGPUSSASpiller::processLoop(MachineLoop *L) {
+ for (auto MBB : L->getBlocks()) {
+ connectToPredecessors(*MBB, true);
+ processBlock(*MBB);
+ }
+}
+
+void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
+ bool IgnoreLoops) {
+
+ SmallVector<MachineBasicBlock *> Preds(predecessors(&MBB));
+
+ // in RPOT loop latches have not been processed yet
+ // their Active and Spill sets are not yet known
+ // Exclude from processing and postpone.
+ if (!IgnoreLoops && LI.isLoopHeader(&MBB)) {
+ MachineLoop *L = LI.getLoopFor(&MBB);
+ SmallVector<MachineBasicBlock *> Latches;
+ L->getLoopLatches(Latches);
+ for (auto LL : Latches) {
+ remove_if(Preds, [LL](MachineBasicBlock *BB) {
+ return LL->getNumber() == BB->getNumber();
+ });
+ LoopHeader2Latches[MBB.getNumber()].push_back(LL->getNumber());
+ PostponedLoopLatches[LL->getNumber()] = MBB.getNumber();
+ }
+ }
+
+ SpillInfo &Cur = getBlockInfo(MBB);
+ for (auto Pred : Preds) {
+ Cur.SpillSet.set_union(getBlockInfo(*Pred).SpillSet);
+ }
+ set_intersect(Cur.SpillSet, Cur.ActiveSet);
+ for (auto Pred : Preds) {
+ for (auto R : set_difference(Cur.ActiveSet, getBlockInfo(*Pred).ActiveSet))
+ reloadAtEnd(*Pred, R);
+
+ for (auto S : set_intersection(
+ set_difference(Cur.SpillSet, getBlockInfo(*Pred).SpillSet),
+ getBlockInfo(*Pred).ActiveSet))
+ spillAtEnd(*Pred, S);
+ }
+}
+
+void AMDGPUSSASpiller::initActiveSetUsualBlock(MachineBasicBlock &MBB) {
+
+ if (predecessors(&MBB).empty())
+ return;
+
+ auto Pred = MBB.pred_begin();
+
+ RegisterSet Take = getBlockInfo(**Pred).ActiveSet;
+ RegisterSet Cand = getBlockInfo(**Pred).ActiveSet;
+
+ for (std::next(Pred); Pred != MBB.pred_end(); ++Pred) {
+ set_intersect(Take, getBlockInfo(**Pred).ActiveSet);
+ Cand.set_union(getBlockInfo(**Pred).ActiveSet);
+ }
+ Cand.set_subtract(Take);
+
+ formActiveSet(MBB, Take, Cand);
+}
+
+void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
+ auto &Entry = getBlockInfo(MBB);
+ RegisterSet LiveIn;
+
+ for (unsigned i = 0; i < MRI->getNumVirtRegs(); i++) {
+ Register VReg = Register::index2VirtReg(i);
+ if (LV.isLiveIn(VReg, MBB))
+ LiveIn.insert(VReg);
+ }
+
+ for (auto &PHI : MBB.phis()) {
+ for (auto U : PHI.uses()) {
+ if (U.isReg()) {
+ // assume PHIs operands are always virtual regs
+ LiveIn.insert(U.getReg());
+ }
+ }
+ }
+
+ RegisterSet UsedInLoop;
+ MachineLoop *L = LI.getLoopFor(&MBB);
+ for (auto B : L->blocks()) {
+ RegisterSet Tmp(NU.usedInBlock(*B));
+ UsedInLoop.set_union(Tmp);
+ }
+
+ // Take - LiveIns used in Loop. Cand - LiveThrough
+ RegisterSet Take = set_intersection(LiveIn, UsedInLoop);
+ RegisterSet Cand = set_difference(LiveIn, UsedInLoop);
+
+ RegisterSet TakeVGPRS, TakeSGPRS, TakeAGPRS;
+ unsigned TakeSGPRsNum = 0, TakeVGPRsNum = 0, TakeAGPRsNum = 0;
+
+ splitByRegPressureSet(Take, TakeSGPRS, TakeSGPRsNum, TakeVGPRS, TakeVGPRsNum,
+ TakeAGPRS, TakeAGPRsNum);
+
+ RegisterSet CandVGPRS, CandSGPRS, CandAGPRS;
+ unsigned CandSGPRsNum = 0, CandVGPRsNum = 0,
+ CandAGPRsNum = 0;
+
+ splitByRegPressureSet(Cand, CandSGPRS, CandSGPRsNum,
+ CandVGPRS, CandVGPRsNum, CandAGPRS,
+ CandAGPRsNum);
+
+ if (TakeSGPRsNum >= NumAvailableSGPRs) {
+ NU.getSortedForInstruction(*MBB.instr_begin(), TakeSGPRS);
+ Entry.ActiveSet.insert(TakeSGPRS.begin(),
+ TakeSGPRS.begin() + NumAvailableSGPRs);
+ } else {
+ unsigned FreeSpace = NumAvailableSGPRs - TakeSGPRsNum;
+
+ Entry.ActiveSet.insert(TakeSGPRS.begin(), TakeSGPRS.end());
+ NU.getSortedForInstruction(*MBB.instr_begin(), CandSGPRS);
+ Entry.ActiveSet.insert(CandSGPRS.begin(),
+ CandSGPRS.begin() + FreeSpace);
+ }
+
+ formActiveSet(MBB, Take, Cand, L);
+}
+
+void AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, Register VReg) {}
+
+void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, Register VReg) {}
+
+void AMDGPUSSASpiller::reloadBefore(Register,
+ MachineBasicBlock::iterator InsertBefore) {}
+
+void AMDGPUSSASpiller::spillBefore(Register,
+ MachineBasicBlock::iterator InsertBefore) {}
+
+SmallVector<unsigned> AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
+ return SmallVector<unsigned>();
+}
+
+void AMDGPUSSASpiller::limit(RegisterSet &Active, RegisterSet &Spilled,
+ MachineBasicBlock::iterator I, const RegisterSet Defs) {
+ MachineBasicBlock::iterator LimitPoint = I;
+ RegisterSet VGPRS, SGPRS, AGPRS;
+ unsigned CurSGPRsNum = 0, CurVGPRsNum = 0, CurAGPRsNum = 0;
+ unsigned NumSGPRDefs = 0, NumVGPRDefs = 0, NumAGPRDefs = 0;
+
+ splitByRegPressureSet(Active, SGPRS, CurSGPRsNum, VGPRS, CurVGPRsNum, AGPRS,
+ CurAGPRsNum);
+ if (!Defs.empty()) {
+ RegisterSet VGPRS, SGPRS, AGPRS;
+ splitByRegPressureSet(Defs, SGPRS, NumSGPRDefs, VGPRS, NumVGPRDefs, AGPRS,
+ NumAGPRDefs);
+ LimitPoint++;
+ }
+
+ if (CurSGPRsNum > NumAvailableSGPRs - NumSGPRDefs)
+ limit(Active, Spilled, LimitPoint, I, SGPRS,
+ CurSGPRsNum - NumAvailableSGPRs + NumSGPRDefs);
+
+ if (CurVGPRsNum > NumAvailableVGPRs - NumVGPRDefs)
+ limit(Active, Spilled, LimitPoint, I, VGPRS,
+ CurVGPRsNum - NumAvailableVGPRs + NumVGPRDefs);
+
+ if (CurAGPRsNum > NumAvailableAGPRs - NumAGPRDefs)
+ limit(Active, Spilled, LimitPoint, I, AGPRS,
+ CurAGPRsNum - NumAvailableAGPRs + NumAGPRDefs);
+}
+
+void AMDGPUSSASpiller::limit(RegisterSet &Active, RegisterSet &Spilled,
+ MachineBasicBlock::iterator LimitPoint,
+ MachineBasicBlock::iterator InsertionPoint,
+ RegisterSet RegClassSubset, unsigned Limit) {
+ NU.getSortedForInstruction(*LimitPoint, RegClassSubset);
+ RegisterSet Tmp(RegClassSubset.end() - Limit, RegClassSubset.end());
+ Active.set_subtract(Tmp);
+ Tmp.set_subtract(Spilled);
+ for (auto R : Tmp) {
+ if (!NU.isDead(*InsertionPoint, R))
+ spillBefore(R, InsertionPoint);
+ }
+}
+
+void AMDGPUSSASpiller::splitByRegPressureSet(
+ const RegisterSet In, RegisterSet &SGPRS, unsigned &SGPRRP,
+ RegisterSet &VGPRS, unsigned &VGPRRP, RegisterSet &AGPRS,
+ unsigned &AGPRRP) {
+ for (auto VReg : In) {
+ const TargetRegisterClass *RC = TRI->getRegClass(VReg);
+ unsigned Weight = TRI->getRegClassWeight(RC).RegWeight;
+ const int *RPS = TRI->getRegClassPressureSets(RC);
+ while (*RPS != -1) {
+ if (*RPS == AMDGPU::RegisterPressureSets::SReg_32) {
+ SGPRS.insert(VReg);
+ SGPRRP += Weight;
+ break;
+ }
+ if (*RPS == AMDGPU::RegisterPressureSets::VGPR_32) {
+ VGPRS.insert(VReg);
+ VGPRRP += Weight;
+ }
+ if (*RPS == AMDGPU::RegisterPressureSets::AGPR_32) {
+ AGPRS.insert(VReg);
+ AGPRRP += Weight;
+ }
+ }
+ }
+}
+
+void AMDGPUSSASpiller::formActiveSet(const MachineBasicBlock &MBB,
+ const RegisterSet Take,
+ const RegisterSet Cand, MachineLoop *L) {
+ auto &Entry = getBlockInfo(MBB);
+
+ RegisterSet TakeVGPRS, TakeSGPRS, TakeAGPRS;
+ unsigned TakeSGPRsNum = 0, TakeVGPRsNum = 0, TakeAGPRsNum = 0;
+
+ splitByRegPressureSet(Take, TakeSGPRS, TakeSGPRsNum, TakeVGPRS, TakeVGPRsNum,
+ TakeAGPRS, TakeAGPRsNum);
+
+ RegisterSet CandVGPRS, CandSGPRS, CandAGPRS;
+ unsigned CandSGPRsNum = 0, CandVGPRsNum = 0, CandAGPRsNum = 0;
+
+ splitByRegPressureSet(Cand, CandSGPRS, CandSGPRsNum, CandVGPRS, CandVGPRsNum,
+ CandAGPRS, CandAGPRsNum);
+
+ if (TakeSGPRsNum >= NumAvailableSGPRs) {
+ NU.getSortedForInstruction(*MBB.instr_begin(), TakeSGPRS);
+ Entry.ActiveSet.insert(TakeSGPRS.begin(),
+ TakeSGPRS.begin() + NumAvailableSGPRs);
+ } else {
+ Entry.ActiveSet.insert(TakeSGPRS.begin(), TakeSGPRS.end());
+ unsigned FreeSpace = 0;
+ if (L) {
+ unsigned LoopMaxSGPRRP =
+ getLoopMaxRP(L)[AMDGPU::RegisterPressureSets::SReg_32];
+ FreeSpace = NumAvailableSGPRs - (LoopMaxSGPRRP - CandSGPRsNum);
+ } else {
+ FreeSpace = NumAvailableSGPRs - TakeSGPRsNum;
+ }
+ NU.getSortedForInstruction(*MBB.instr_begin(), CandSGPRS);
+ Entry.ActiveSet.insert(CandSGPRS.begin(), CandSGPRS.begin() + FreeSpace);
+ }
+
+ if (TakeVGPRsNum >= NumAvailableVGPRs) {
+ NU.getSortedForInstruction(*MBB.instr_begin(), TakeVGPRS);
+ Entry.ActiveSet.insert(TakeVGPRS.begin(),
+ TakeVGPRS.begin() + NumAvailableVGPRs);
+ } else {
+ Entry.ActiveSet.insert(TakeVGPRS.begin(), TakeVGPRS.end());
+ unsigned FreeSpace = 0;
+ if (L) {
+ unsigned LoopMaxVGPRRP =
+ getLoopMaxRP(L)[AMDGPU::RegisterPressureSets::VGPR_32];
+ FreeSpace = NumAvailableVGPRs - (LoopMaxVGPRRP - CandVGPRsNum);
+ } else {
+ FreeSpace = NumAvailableVGPRs - TakeVGPRsNum;
+ }
+ NU.getSortedForInstruction(*MBB.instr_begin(), CandVGPRS);
+ Entry.ActiveSet.insert(CandVGPRS.begin(), CandVGPRS.begin() + FreeSpace);
+ }
+
+ if (TakeAGPRsNum >= NumAvailableAGPRs) {
+ NU.getSortedForInstruction(*MBB.instr_begin(), TakeAGPRS);
+ Entry.ActiveSet.insert(TakeAGPRS.begin(),
+ TakeAGPRS.begin() + NumAvailableAGPRs);
+ } else {
+ Entry.ActiveSet.insert(TakeAGPRS.begin(), TakeAGPRS.end());
+ unsigned FreeSpace = 0;
+ if (L) {
+ unsigned LoopMaxAGPRRP =
+ getLoopMaxRP(L)[AMDGPU::RegisterPressureSets::AGPR_32];
+ FreeSpace = NumAvailableAGPRs - (LoopMaxAGPRRP - CandAGPRsNum);
+ } else {
+ FreeSpace = NumAvailableAGPRs - TakeAGPRsNum;
+ }
+ NU.getSortedForInstruction(*MBB.instr_begin(), CandAGPRS);
+ Entry.ActiveSet.insert(CandAGPRS.begin(), CandAGPRS.begin() + FreeSpace);
+ }
+}
+
+bool AMDGPUSSASpiller::run(MachineFunction &MF) {
+ init(MF);
+ processFunction(MF);
+ return false;
+}
+} // namespace
+
+PreservedAnalyses
+llvm::AMDGPUSSASpillerPass::run(MachineFunction &MF,
+ MachineFunctionAnalysisManager &MFAM) {
+ LiveVariables &LV = MFAM.getResult<LiveVariablesAnalysis>(MF);
+ MachineLoopInfo &LI = MFAM.getResult<MachineLoopAnalysis>(MF);
+ MachineDominatorTree &MDT = MFAM.getResult<MachineDominatorTreeAnalysis>(MF);
+ AMDGPUNextUseAnalysis::Result &NU = MFAM.getResult<AMDGPUNextUseAnalysis>(MF);
+ AMDGPUSSASpiller Impl(LV, LI, MDT, NU);
+ bool Changed = Impl.run(MF);
+ if (!Changed)
+ return PreservedAnalyses::all();
+
+ // TODO: We could detect CFG changed.
+ auto PA = getMachineFunctionPassPreservedAnalyses();
+ return PA;
+}
+
+class AMDGPUSSASpillerLegacy : public MachineFunctionPass {
+public:
+ static char ID;
+
+ AMDGPUSSASpillerLegacy() : MachineFunctionPass(ID) {
+ initializeAMDGPUSSASpillerLegacyPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ StringRef getPassName() const override { return "AMDGPU SSA Spiller"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<LiveVariablesWrapperPass>();
+ AU.addRequired<MachineDominatorTreeWrapperPass>();
+ AU.addRequired<MachineLoopInfoWrapperPass>();
+ AU.addRequired<AMDGPUNextUseAnalysisWrapper>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+
+bool AMDGPUSSASpillerLegacy::runOnMachineFunction(MachineFunction &MF) {
+ LiveVariables &LV = getAnalysis<LiveVariablesWrapperPass>().getLV();
+ MachineLoopInfo &LI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
+ MachineDominatorTree &MDT =
+ getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
+ AMDGPUNextUseAnalysis::Result &NU =
+ getAnalysis<AMDGPUNextUseAnalysisWrapper>().getNU();
+ AMDGPUSSASpiller Impl(LV, LI, MDT, NU);
+ return Impl.run(MF);
+}
+
+INITIALIZE_PASS_BEGIN(AMDGPUSSASpillerLegacy, DEBUG_TYPE, "AMDGPU SSA Spiller",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(LiveVariablesWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(AMDGPUNextUseAnalysisWrapper)
+INITIALIZE_PASS_END(AMDGPUSSASpillerLegacy, DEBUG_TYPE, "AMDGPU SSA Spiller",
+ false, false)
+
+char AMDGPUSSASpillerLegacy::ID = 0;
+
+char &llvm::AMDGPUSSASpillerLegacyID = AMDGPUSSASpillerLegacy::ID;
+
+FunctionPass *llvm::createAMDGPUSSASpillerLegacyPass() {
+ return new AMDGPUSSASpillerLegacy();
+}
+
+llvm::PassPluginLibraryInfo getMyNewMachineFunctionPassPluginInfo() {
+ return {LLVM_PLUGIN_API_VERSION, "AMDGPUSSASpiller",
+ LLVM_VERSION_STRING, [](PassBuilder &PB) {
+ PB.registerPipelineParsingCallback(
+ [](StringRef Name, MachineFunctionPassManager &MFPM,
+ ArrayRef<PassBuilder::PipelineElement>) {
+ if (Name == "amdgpu-ssa-spiller") {
+ MFPM.addPass(AMDGPUSSASpillerPass());
+ return true;
+ }
+ return false;
+ });
+ }};
+}
+
+// Expose the pass to LLVM’s pass manager infrastructure
+extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo
+llvmGetPassPluginInfo() {
+ return getMyNewMachineFunctionPassPluginInfo();
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.h b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.h
new file mode 100644
index 0000000000000..e3468604f5a42
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.h
@@ -0,0 +1,25 @@
+//===- AMDGPUSSASpiller.h ----------------------------------------*- C++- *-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_SSASPILLER_H
+#define LLVM_LIB_TARGET_AMDGPU_SSASPILLER_H
+
+#include "llvm/CodeGen/MachinePassManager.h"
+
+namespace llvm {
+
+class AMDGPUSSASpillerPass : public PassInfoMixin<AMDGPUSSASpillerPass> {
+public:
+ AMDGPUSSASpillerPass() = default;
+ PreservedAnalyses run(MachineFunction &MF,
+ MachineFunctionAnalysisManager &MFAM);
+};
+
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_AMDGPU_SSASPILLER_H
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 4a2f0a13b1325..2e27343e032e3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -544,6 +544,7 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
initializeAMDGPUPrepareAGPRAllocLegacyPass(*PR);
initializeGCNDPPCombineLegacyPass(*PR);
initializeSILowerI1CopiesLegacyPass(*PR);
+ initializeAMDGPUSSASpillerLegacyPass(*PR);
initializeAMDGPUGlobalISelDivergenceLoweringPass(*PR);
initializeAMDGPURegBankSelectPass(*PR);
initializeAMDGPURegBankLegalizePass(*PR);
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index 619ff4e5c73c4..4bb3112d3add9 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -109,6 +109,8 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUSelectionDAGInfo.cpp
AMDGPUSetWavePriority.cpp
AMDGPUSplitModule.cpp
+ AMDGPUSSASpiller.cpp
+ AMDGPUNextUseAnalysis.cpp
AMDGPUSubtarget.cpp
AMDGPUTargetMachine.cpp
AMDGPUTargetObjectFile.cpp
>From 33a0e1ee396e3fc74b50fc7b3b2a93af776281cb Mon Sep 17 00:00:00 2001
From: Alexander Timofeev <alexander.timofeev at amd.com>
Date: Mon, 9 Dec 2024 19:04:54 +0100
Subject: [PATCH 02/46] SSA Spiller WIP 09.12.24
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 84 +++-
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 18 +-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 419 +++++++++---------
3 files changed, 285 insertions(+), 236 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 25dcd52a413e4..d66c9a9a35818 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -1,4 +1,5 @@
+
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/iterator_range.h"
@@ -78,7 +79,7 @@ void NextUseResult::analyze(const MachineFunction &MF) {
if (MO.isReg() && MO.getReg().isVirtual() && MO.isUse()) {
Register VReg = MO.getReg();
MachineInstr *Def = MRI->getVRegDef(VReg);
- if (Def->getParent() == MBB)
+ if (Def && Def->getParent() == MBB)
// defined in block - skip it
continue;
unsigned Distance =
@@ -99,32 +100,75 @@ void NextUseResult::analyze(const MachineFunction &MF) {
}
}
-unsigned NextUseResult::getNextUseDistance(const MachineInstr &MI, const Register Vreg) {
- unsigned Dist = Infinity;
- const MachineBasicBlock *MBB = MI.getParent();
- SlotIndex Begin = Indexes->getMBBStartIdx(MBB->getNumber());
+unsigned NextUseResult::getNextUseDistance(const MachineInstr &MI,
+ const Register VReg) {
SlotIndex Idx = Indexes->getInstructionIndex(MI);
- int IDist = Begin.distance(Idx)/SlotIndex::InstrDist;
- if (auto VMapRef = getVRegMap(MBB)) {
+ assert(Idx.isValid() && "Invalid Instruction index!");
+ if (InstrCache.contains(&Idx) && InstrCache[&Idx].contains(VReg)) {
+ return InstrCache[&Idx][VReg];
+ }
+ return computeNextUseDistance(*MI.getParent(), Idx, VReg);
+}
+
+unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
+ Register VReg) {
+ SlotIndex Idx = Indexes->getMBBEndIdx(&MBB);
+ assert(Idx.isValid() && "Invalid Instruction index!");
+ if (InstrCache.contains(&Idx) && InstrCache[&Idx].contains(VReg)) {
+ return InstrCache[&Idx][VReg];
+ }
+ return computeNextUseDistance(MBB, Idx, VReg);
+}
+
+unsigned NextUseResult::computeNextUseDistance(const MachineBasicBlock &MBB,
+ const SlotIndex I,
+ Register VReg) {
+ unsigned Dist = Infinity;
+
+ SlotIndex Begin = Indexes->getMBBStartIdx(MBB.getNumber());
+
+ int IDist = Begin.distance(I)/SlotIndex::InstrDist;
+ if (auto VMapRef = getVRegMap(&MBB)) {
VRegDistances &VRegs = VMapRef.value();
- if (VRegs.contains(Vreg)) {
- int UseDist = VRegs[Vreg];
+ if (VRegs.contains(VReg)) {
+ int UseDist = VRegs[VReg];
if ((UseDist - IDist) < 0) {
- for (auto Succ : successors(MBB)) {
+ for (auto Succ : successors(&MBB)) {
if (auto SuccVMapRef = getVRegMap(Succ)) {
VRegDistances &SuccVRegs = SuccVMapRef.value();
- if (SuccVRegs.contains(Vreg)) {
- Dist = std::min(Dist, SuccVRegs[Vreg]);
+ if (SuccVRegs.contains(VReg)) {
+ Dist = std::min(Dist, SuccVRegs[VReg]);
}
}
}
} else {
Dist = UseDist - IDist;
}
- return Dist;
+ } else {
+ // We hit a case when the VReg is defined and used inside the block.
+ // Let's see if the I is in between.
+ MachineInstr *Def = MRI->getVRegDef(VReg);
+ assert(Def && "Neither use distance no Def found for reg!");
+ SlotIndex DefIdx = Indexes->getInstructionIndex(*Def);
+ assert(DefIdx.isValid() && "Register Def not in the Index");
+ if (SlotIndex::isEarlierInstr(DefIdx, I)) {
+ // "I" is after the Def
+ for (auto &U : MRI->use_instructions(VReg)) {
+ assert(U.getParent() == &MBB &&
+ "Use out of the block fount but distance was not recorded");
+ SlotIndex UIdx = Indexes->getInstructionIndex(U);
+ if (SlotIndex::isEarlierInstr(I, UIdx)) {
+ unsigned UDist = I.distance(UIdx)/SlotIndex::InstrDist;
+ if (UDist < Dist)
+ Dist = UDist;
+ }
+ }
+ }
}
+ if (Dist != Infinity)
+ InstrCache[&I][VReg] = Dist;
}
- return Infinity;
+ return Dist;
}
AMDGPUNextUseAnalysis::Result
@@ -154,7 +198,6 @@ char AMDGPUNextUseAnalysisWrapper::ID = 0;
char &llvm::AMDGPUNextUseAnalysisID = AMDGPUNextUseAnalysisWrapper::ID;
INITIALIZE_PASS_BEGIN(AMDGPUNextUseAnalysisWrapper, "amdgpu-next-use",
"AMDGPU Next Use Analysis", false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(SlotIndexesWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
INITIALIZE_PASS_END(AMDGPUNextUseAnalysisWrapper, "amdgpu-next-use",
@@ -165,6 +208,7 @@ bool AMDGPUNextUseAnalysisWrapper::runOnMachineFunction(
NU.Indexes = &getAnalysis<SlotIndexesWrapperPass>().getSI();
NU.LI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
NU.MRI = &MF.getRegInfo();
+ assert(NU.MRI->isSSA());
NU.init(MF);
NU.analyze(MF);
LLVM_DEBUG(NU.dump());
@@ -173,13 +217,9 @@ bool AMDGPUNextUseAnalysisWrapper::runOnMachineFunction(
void AMDGPUNextUseAnalysisWrapper::getAnalysisUsage(
AnalysisUsage &AU) const {
- AU.setPreservesCFG();
- AU.addRequiredTransitiveID(MachineLoopInfoID);
- AU.addPreservedID(MachineLoopInfoID);
- AU.addRequiredTransitiveID(MachineDominatorsID);
- AU.addPreservedID(MachineDominatorsID);
- AU.addPreserved<SlotIndexesWrapperPass>();
- AU.addRequiredTransitive<SlotIndexesWrapperPass>();
+ AU.setPreservesAll();
+ AU.addRequired<MachineLoopInfoWrapperPass>();
+ AU.addRequired<SlotIndexesWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 61df809c1e3b8..39ec6ccc895f1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -25,7 +25,7 @@ class NextUseResult {
SlotIndexes *Indexes;
const MachineRegisterInfo *MRI;
MachineLoopInfo *LI;
-
+ DenseMap<const SlotIndex *, DenseMap<Register, unsigned>> InstrCache;
public:
using VRegDistances = DenseMap<Register, unsigned>;
@@ -104,6 +104,9 @@ class NextUseResult {
VRegs[VReg] = Distance;
}
+ unsigned computeNextUseDistance(const MachineBasicBlock &MBB,
+ const SlotIndex I, Register Vreg);
+
void clear() {
NextUseMap.clear();
EdgeWeigths.clear();
@@ -120,12 +123,19 @@ class NextUseResult {
void print(raw_ostream &O) const { dump(O); }
- unsigned getNextUseDistance(const MachineInstr &MI, Register Vreg);
+ unsigned getNextUseDistance(const MachineInstr &MI, Register VReg);
+ unsigned getNextUseDistance(const MachineBasicBlock &MBB, Register VReg);
+
+ bool isDead(MachineBasicBlock &MBB, Register R) {
+ if (!R.isVirtual())
+ report_fatal_error("Only virtual registers allowed!\n", true);
+ return getNextUseDistance(MBB, R) == Infinity;
+ }
- bool isDead(MachineBasicBlock::iterator Pos, Register R) {
+ bool isDead(MachineInstr &MI, Register R) {
if (!R.isVirtual())
report_fatal_error("Only virtual registers allowed!\n", true);
- return getNextUseDistance(*Pos, R) == Infinity;
+ return getNextUseDistance(MI, R) == Infinity;
}
void getSortedForInstruction(const MachineInstr &MI,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 9f87d0f5df202..bbdb82b5cda55 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -3,6 +3,7 @@
#include "llvm/ADT/SetOperations.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
@@ -11,6 +12,7 @@
#include "llvm/Target/TargetMachine.h"
#include "AMDGPUNextUseAnalysis.h"
+#include "GCNRegPressure.h"
using namespace llvm;
@@ -18,13 +20,24 @@ using namespace llvm;
namespace {
+ static void dumpRegSet(SetVector<Register> VRegs) {
+ dbgs() << "\n";
+ for (auto R : VRegs) {
+ dbgs() << printReg(R) << " ";
+ }
+ dbgs() << "\n";
+ }
+
class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
- LiveVariables &LV;
+ const LiveIntervals &LIS;
MachineLoopInfo &LI;
MachineDominatorTree &MDT;
AMDGPUNextUseAnalysis::Result &NU;
const MachineRegisterInfo *MRI;
const SIRegisterInfo *TRI;
+ const SIInstrInfo *TII;
+ const GCNSubtarget *ST;
+ MachineFrameInfo *MFI;
using RegisterSet = SetVector<Register>;
@@ -34,23 +47,39 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
RegisterSet SpillSet;
};
- unsigned NumAvailableSGPRs;
- unsigned NumAvailableVGPRs;
- unsigned NumAvailableAGPRs;
+ bool IsVGPRsPass;
+ unsigned NumAvailableRegs;
DenseMap<unsigned, SpillInfo> RegisterMap;
DenseMap<unsigned, unsigned> PostponedLoopLatches;
DenseMap<unsigned, SmallVector<unsigned>> LoopHeader2Latches;
- void init(const MachineFunction &MF) {
- const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
- MRI = &MF.getRegInfo();
- TRI = ST.getRegisterInfo();
- NumAvailableVGPRs = ST.getTotalNumVGPRs();
- NumAvailableSGPRs = ST.getTotalNumSGPRs();
-
- // FIXME: what is real num AGPRs available?
+ void dump() {
+ for (auto SI : RegisterMap) {
+ dbgs() << "\nMBB: " << SI.first;
+ dbgs() << "\n\tW: ";
+ for (auto R : SI.second.ActiveSet) {
+ dbgs() << printReg(R) << " ";
+ }
+ dbgs() << "\n\tS: ";
+ for (auto R : SI.second.SpillSet) {
+ dbgs() << printReg(R) << " ";
+ }
+ dbgs() << "\n";
+ }
+ }
- NumAvailableAGPRs = NumAvailableVGPRs;
+ void init(MachineFunction &MF, bool IsVGPRs) {
+ IsVGPRsPass = IsVGPRs;
+ ST = &MF.getSubtarget<GCNSubtarget>();
+ MRI = &MF.getRegInfo();
+ MFI = &MF.getFrameInfo();
+ TRI = ST->getRegisterInfo();
+ TII = ST->getInstrInfo();
+ NumAvailableRegs = IsVGPRsPass
+ ? TRI->getRegPressureSetLimit(
+ MF, AMDGPU::RegisterPressureSets::VGPR_32)
+ : TRI->getRegPressureSetLimit(
+ MF, AMDGPU::RegisterPressureSets::SReg_32);
}
SpillInfo &getBlockInfo(const MachineBasicBlock &MBB);
@@ -64,30 +93,30 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
void reloadAtEnd(MachineBasicBlock &MBB, Register VReg);
void spillAtEnd(MachineBasicBlock &MBB, Register VReg);
- void reloadBefore(Register, MachineBasicBlock::iterator InsertBefore);
- void spillBefore(Register, MachineBasicBlock::iterator InsertBefore);
+ void reloadBefore(Register VReg, MachineBasicBlock::iterator InsertBefore);
+ void spillBefore(Register VReg, MachineBasicBlock::iterator InsertBefore);
- SmallVector<unsigned> getLoopMaxRP(MachineLoop *L);
+ unsigned getLoopMaxRP(MachineLoop *L);
void limit(RegisterSet &Active, RegisterSet &Spilled,
MachineBasicBlock::iterator I,
const RegisterSet Defs = RegisterSet());
- void limit(RegisterSet &Active, RegisterSet &Spilled,
- MachineBasicBlock::iterator LimitPoint,
- MachineBasicBlock::iterator InsertionPoint,
- RegisterSet RegClassSubset, unsigned Limit);
- void splitByRegPressureSet(const RegisterSet In, RegisterSet &SGPRS,
- unsigned &SGPRRP, RegisterSet &VGPRS,
- unsigned &VGPRRP, RegisterSet &AGPRS,
- unsigned &AGPRRP);
- void formActiveSet(const MachineBasicBlock &MBB, const RegisterSet Take,
- const RegisterSet Cand, MachineLoop *L = nullptr);
+
+ unsigned getSizeInRegs(const Register VReg);
+ unsigned getSizeInRegs(const RegisterSet VRegs);
+ bool takeReg(Register R) {
+ return ((IsVGPRsPass && TRI->isVGPR(*MRI, R)) ||
+ (!IsVGPRsPass && TRI->isSGPRReg(*MRI, R)));
+ }
+
+ unsigned fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
+ unsigned Capacity = 0);
public:
AMDGPUSSASpiller() = default;
- AMDGPUSSASpiller(LiveVariables &LV, MachineLoopInfo &LI,
+ AMDGPUSSASpiller(const LiveIntervals &LIS, MachineLoopInfo &LI,
MachineDominatorTree &MDT, AMDGPUNextUseAnalysis::Result &NU)
- : LV(LV), LI(LI), MDT(MDT), NU(NU) {}
+ : LIS(LIS), LI(LI), MDT(MDT), NU(NU) {}
bool run(MachineFunction &MF);
};
@@ -108,6 +137,7 @@ void AMDGPUSSASpiller::processFunction(MachineFunction &MF) {
}
connectToPredecessors(*MBB);
processBlock(*MBB);
+ dump();
// We process loop blocks twice: once with Spill/Active sets of
// loop latch blocks unknown, and then again as soon as the latch blocks
// sets are computed.
@@ -124,32 +154,38 @@ void AMDGPUSSASpiller::processFunction(MachineFunction &MF) {
}
void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
- auto &Entry = getBlockInfo(MBB);
+ auto &Entry = RegisterMap[MBB.getNumber()];
RegisterSet &Active = Entry.ActiveSet;
RegisterSet &Spilled = Entry.SpillSet;
RegisterSet Reloads;
- for (auto &I : MBB) {
- for (auto U : I.uses()) {
+ for (MachineBasicBlock::iterator I : MBB) {
+ for (auto U : I->uses()) {
if (!U.isReg())
continue;
if (U.getReg().isPhysical())
continue;
Register VReg = U.getReg();
+ if (!takeReg(VReg))
+ continue;
if (Active.insert(VReg)) {
// Not in reg, hence, should have been spilled before
- // TODO: This is ODD as the Spilled set is a union among all
+ // FIXME: This is ODD as the Spilled set is a union among all
// predecessors and should already contain all spilled before!
- // Spilled.insert(U.getReg());
+ Spilled.insert(U.getReg());
Reloads.insert(VReg);
}
}
RegisterSet Defs;
- for (auto D : I.defs()) {
- if (D.getReg().isVirtual())
+ for (auto D : I->defs()) {
+ if (D.getReg().isVirtual() && takeReg(D.getReg()))
Defs.insert(D.getReg());
}
+
+ if (Reloads.empty() && Defs.empty())
+ continue;
+
limit(Active, Spilled, I);
- limit(Active, Spilled, std::next(&I), Defs);
+ limit(Active, Spilled, std::next(I), Defs);
// FIXME: limit with Defs is assumed to create room for the registers being
// defined by I. Calling with std::next(I) makes spills inserted AFTER I!!!
Active.insert(Defs.begin(), Defs.end());
@@ -157,6 +193,13 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
for (auto R : Reloads)
reloadBefore(R, I);
}
+ // Now, clear dead registers.
+ RegisterSet Deads;
+ for (auto R : Active) {
+ if (NU.isDead(MBB, R))
+ Deads.insert(R);
+ }
+ Active.set_subtract(Deads);
}
void AMDGPUSSASpiller::processLoop(MachineLoop *L) {
@@ -168,7 +211,10 @@ void AMDGPUSSASpiller::processLoop(MachineLoop *L) {
void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
bool IgnoreLoops) {
+ if (predecessors(&MBB).empty())
+ return;
+ auto &Entry = RegisterMap[MBB.getNumber()];
SmallVector<MachineBasicBlock *> Preds(predecessors(&MBB));
// in RPOT loop latches have not been processed yet
@@ -187,19 +233,32 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
}
}
- SpillInfo &Cur = getBlockInfo(MBB);
for (auto Pred : Preds) {
- Cur.SpillSet.set_union(getBlockInfo(*Pred).SpillSet);
+ dumpRegSet(getBlockInfo(*Pred).SpillSet);
+ Entry.SpillSet.set_union(getBlockInfo(*Pred).SpillSet);
+ dumpRegSet(Entry.SpillSet);
}
- set_intersect(Cur.SpillSet, Cur.ActiveSet);
+ set_intersect(Entry.SpillSet, Entry.ActiveSet);
for (auto Pred : Preds) {
- for (auto R : set_difference(Cur.ActiveSet, getBlockInfo(*Pred).ActiveSet))
+ auto PE = getBlockInfo(*Pred);
+ RegisterSet ReloadInPred = set_difference(Entry.ActiveSet, PE.ActiveSet);
+ // We're about to insert N reloads at the end of the predecessor block.
+ // Make sure we have enough registers for N definitions or spill to make
+ // room for them.
+ limit(PE.ActiveSet, PE.SpillSet, Pred->end(), ReloadInPred);
+ for (auto R : ReloadInPred) {
reloadAtEnd(*Pred, R);
+ // FIXME: Do we need to update sets?
+ PE.ActiveSet.insert(R);
+ }
- for (auto S : set_intersection(
- set_difference(Cur.SpillSet, getBlockInfo(*Pred).SpillSet),
- getBlockInfo(*Pred).ActiveSet))
+ for (auto S : set_intersection(set_difference(Entry.SpillSet, PE.SpillSet),
+ PE.ActiveSet)) {
spillAtEnd(*Pred, S);
+ // FIXME: Do we need to update sets?
+ PE.SpillSet.insert(S);
+ Entry.SpillSet.insert(S);
+ }
}
}
@@ -213,28 +272,38 @@ void AMDGPUSSASpiller::initActiveSetUsualBlock(MachineBasicBlock &MBB) {
RegisterSet Take = getBlockInfo(**Pred).ActiveSet;
RegisterSet Cand = getBlockInfo(**Pred).ActiveSet;
- for (std::next(Pred); Pred != MBB.pred_end(); ++Pred) {
+ for (Pred = std::next(Pred); Pred != MBB.pred_end(); ++Pred) {
set_intersect(Take, getBlockInfo(**Pred).ActiveSet);
Cand.set_union(getBlockInfo(**Pred).ActiveSet);
}
Cand.set_subtract(Take);
- formActiveSet(MBB, Take, Cand);
+ if (Take.empty() && Cand.empty())
+ return;
+
+ unsigned TakeSize = fillActiveSet(MBB, Take);
+ if (TakeSize < NumAvailableRegs) {
+ unsigned FullSize = fillActiveSet(MBB, Cand);
+ assert(FullSize <= NumAvailableRegs);
+ }
}
void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
- auto &Entry = getBlockInfo(MBB);
+ // auto &Entry = RegisterMap[MBB.getNumber()];
RegisterSet LiveIn;
for (unsigned i = 0; i < MRI->getNumVirtRegs(); i++) {
Register VReg = Register::index2VirtReg(i);
- if (LV.isLiveIn(VReg, MBB))
+ if (!LIS.hasInterval(VReg))
+ continue;
+ if (takeReg(VReg) && LIS.isLiveInToMBB(LIS.getInterval(VReg), &MBB)) {
LiveIn.insert(VReg);
+ }
}
for (auto &PHI : MBB.phis()) {
for (auto U : PHI.uses()) {
- if (U.isReg()) {
+ if (U.isReg() && takeReg(U.getReg())) {
// assume PHIs operands are always virtual regs
LiveIn.insert(U.getReg());
}
@@ -252,193 +321,121 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
RegisterSet Take = set_intersection(LiveIn, UsedInLoop);
RegisterSet Cand = set_difference(LiveIn, UsedInLoop);
- RegisterSet TakeVGPRS, TakeSGPRS, TakeAGPRS;
- unsigned TakeSGPRsNum = 0, TakeVGPRsNum = 0, TakeAGPRsNum = 0;
- splitByRegPressureSet(Take, TakeSGPRS, TakeSGPRsNum, TakeVGPRS, TakeVGPRsNum,
- TakeAGPRS, TakeAGPRsNum);
-
- RegisterSet CandVGPRS, CandSGPRS, CandAGPRS;
- unsigned CandSGPRsNum = 0, CandVGPRsNum = 0,
- CandAGPRsNum = 0;
-
- splitByRegPressureSet(Cand, CandSGPRS, CandSGPRsNum,
- CandVGPRS, CandVGPRsNum, CandAGPRS,
- CandAGPRsNum);
-
- if (TakeSGPRsNum >= NumAvailableSGPRs) {
- NU.getSortedForInstruction(*MBB.instr_begin(), TakeSGPRS);
- Entry.ActiveSet.insert(TakeSGPRS.begin(),
- TakeSGPRS.begin() + NumAvailableSGPRs);
- } else {
- unsigned FreeSpace = NumAvailableSGPRs - TakeSGPRsNum;
-
- Entry.ActiveSet.insert(TakeSGPRS.begin(), TakeSGPRS.end());
- NU.getSortedForInstruction(*MBB.instr_begin(), CandSGPRS);
- Entry.ActiveSet.insert(CandSGPRS.begin(),
- CandSGPRS.begin() + FreeSpace);
+ unsigned TakeSize = fillActiveSet(MBB, Take);
+ if (TakeSize < NumAvailableRegs) {
+ // At this point we have to decide not for the current block only but for
+ // the whole loop. We use the following heuristic: given that the Cand
+ // register set constitutes of those registers which are live-through the
+ // loop, let's consider LoopMaxRP - CandSize to be the RP caused by those,
+ // used inside the loop. According to this, we can keep NumAvailableRegs -
+ // (LoopMaxRP - Cand.size()) in the loop header active set.
+ unsigned LoopMaxRP = getLoopMaxRP(L);
+ unsigned FreeSpace = NumAvailableRegs - (LoopMaxRP - Cand.size());
+ unsigned FullSize = fillActiveSet(MBB, Cand, FreeSpace);
+ assert(FullSize <= NumAvailableRegs);
}
-
- formActiveSet(MBB, Take, Cand, L);
}
-void AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, Register VReg) {}
+void AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, Register VReg) {
+ reloadBefore(VReg, MBB.getFirstInstrTerminator());
+}
-void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, Register VReg) {}
+void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, Register VReg) {
+ spillBefore(VReg, MBB.getFirstTerminator());
+}
-void AMDGPUSSASpiller::reloadBefore(Register,
- MachineBasicBlock::iterator InsertBefore) {}
+void AMDGPUSSASpiller::reloadBefore(Register VReg,
+ MachineBasicBlock::iterator InsertBefore) {
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
+ int FI = MFI->CreateSpillStackObject(TRI->getRegSizeInBits(*RC),
+ TRI->getSpillAlign(*RC));
+ TII->loadRegFromStackSlot(*InsertBefore->getParent(), InsertBefore, VReg, FI,
+ RC, TRI, VReg);
+}
-void AMDGPUSSASpiller::spillBefore(Register,
- MachineBasicBlock::iterator InsertBefore) {}
+void AMDGPUSSASpiller::spillBefore(Register VReg,
+ MachineBasicBlock::iterator InsertBefore) {
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
+ int FI = MFI->CreateSpillStackObject(TRI->getRegSizeInBits(*RC),
+ TRI->getSpillAlign(*RC));
+ TII->storeRegToStackSlot(*InsertBefore->getParent(), InsertBefore, VReg, true, FI,
+ RC, TRI, VReg);
+}
-SmallVector<unsigned> AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
- return SmallVector<unsigned>();
+unsigned AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
+ unsigned MaxRP = 0;
+ for (auto MBB : L->getBlocks()) {
+ SlotIndex MBBEndSlot = LIS.getSlotIndexes()->getMBBEndIdx(MBB);
+ GCNUpwardRPTracker RPT(LIS);
+ RPT.reset(*MRI, MBBEndSlot);
+ for (auto &MI : reverse(*MBB))
+ RPT.recede(MI);
+ GCNRegPressure RP = RPT.getMaxPressure();
+ unsigned CurMaxRP =
+ IsVGPRsPass ? RP.getVGPRNum(ST->hasGFX90AInsts()) : RP.getSGPRNum();
+ if (CurMaxRP > MaxRP)
+ MaxRP = CurMaxRP;
+ }
+ return MaxRP;
}
void AMDGPUSSASpiller::limit(RegisterSet &Active, RegisterSet &Spilled,
MachineBasicBlock::iterator I, const RegisterSet Defs) {
MachineBasicBlock::iterator LimitPoint = I;
- RegisterSet VGPRS, SGPRS, AGPRS;
- unsigned CurSGPRsNum = 0, CurVGPRsNum = 0, CurAGPRsNum = 0;
- unsigned NumSGPRDefs = 0, NumVGPRDefs = 0, NumAGPRDefs = 0;
- splitByRegPressureSet(Active, SGPRS, CurSGPRsNum, VGPRS, CurVGPRsNum, AGPRS,
- CurAGPRsNum);
if (!Defs.empty()) {
- RegisterSet VGPRS, SGPRS, AGPRS;
- splitByRegPressureSet(Defs, SGPRS, NumSGPRDefs, VGPRS, NumVGPRDefs, AGPRS,
- NumAGPRDefs);
LimitPoint++;
}
- if (CurSGPRsNum > NumAvailableSGPRs - NumSGPRDefs)
- limit(Active, Spilled, LimitPoint, I, SGPRS,
- CurSGPRsNum - NumAvailableSGPRs + NumSGPRDefs);
-
- if (CurVGPRsNum > NumAvailableVGPRs - NumVGPRDefs)
- limit(Active, Spilled, LimitPoint, I, VGPRS,
- CurVGPRsNum - NumAvailableVGPRs + NumVGPRDefs);
+ unsigned CurRP = getSizeInRegs(Active);
+ if(CurRP < NumAvailableRegs - Defs.size())
+ return;
- if (CurAGPRsNum > NumAvailableAGPRs - NumAGPRDefs)
- limit(Active, Spilled, LimitPoint, I, AGPRS,
- CurAGPRsNum - NumAvailableAGPRs + NumAGPRDefs);
-}
+ unsigned Limit = CurRP - NumAvailableRegs + Defs.size();
-void AMDGPUSSASpiller::limit(RegisterSet &Active, RegisterSet &Spilled,
- MachineBasicBlock::iterator LimitPoint,
- MachineBasicBlock::iterator InsertionPoint,
- RegisterSet RegClassSubset, unsigned Limit) {
- NU.getSortedForInstruction(*LimitPoint, RegClassSubset);
- RegisterSet Tmp(RegClassSubset.end() - Limit, RegClassSubset.end());
+ NU.getSortedForInstruction(*LimitPoint, Active);
+ RegisterSet Tmp(Active.end() - Limit, Active.end());
Active.set_subtract(Tmp);
Tmp.set_subtract(Spilled);
for (auto R : Tmp) {
- if (!NU.isDead(*InsertionPoint, R))
- spillBefore(R, InsertionPoint);
- }
-}
-
-void AMDGPUSSASpiller::splitByRegPressureSet(
- const RegisterSet In, RegisterSet &SGPRS, unsigned &SGPRRP,
- RegisterSet &VGPRS, unsigned &VGPRRP, RegisterSet &AGPRS,
- unsigned &AGPRRP) {
- for (auto VReg : In) {
- const TargetRegisterClass *RC = TRI->getRegClass(VReg);
- unsigned Weight = TRI->getRegClassWeight(RC).RegWeight;
- const int *RPS = TRI->getRegClassPressureSets(RC);
- while (*RPS != -1) {
- if (*RPS == AMDGPU::RegisterPressureSets::SReg_32) {
- SGPRS.insert(VReg);
- SGPRRP += Weight;
- break;
- }
- if (*RPS == AMDGPU::RegisterPressureSets::VGPR_32) {
- VGPRS.insert(VReg);
- VGPRRP += Weight;
- }
- if (*RPS == AMDGPU::RegisterPressureSets::AGPR_32) {
- AGPRS.insert(VReg);
- AGPRRP += Weight;
- }
+ if (!NU.isDead(*I, R)) {
+ spillBefore(R, I);
+ Spilled.insert(R);
}
}
}
-void AMDGPUSSASpiller::formActiveSet(const MachineBasicBlock &MBB,
- const RegisterSet Take,
- const RegisterSet Cand, MachineLoop *L) {
- auto &Entry = getBlockInfo(MBB);
-
- RegisterSet TakeVGPRS, TakeSGPRS, TakeAGPRS;
- unsigned TakeSGPRsNum = 0, TakeVGPRsNum = 0, TakeAGPRsNum = 0;
-
- splitByRegPressureSet(Take, TakeSGPRS, TakeSGPRsNum, TakeVGPRS, TakeVGPRsNum,
- TakeAGPRS, TakeAGPRsNum);
-
- RegisterSet CandVGPRS, CandSGPRS, CandAGPRS;
- unsigned CandSGPRsNum = 0, CandVGPRsNum = 0, CandAGPRsNum = 0;
-
- splitByRegPressureSet(Cand, CandSGPRS, CandSGPRsNum, CandVGPRS, CandVGPRsNum,
- CandAGPRS, CandAGPRsNum);
-
- if (TakeSGPRsNum >= NumAvailableSGPRs) {
- NU.getSortedForInstruction(*MBB.instr_begin(), TakeSGPRS);
- Entry.ActiveSet.insert(TakeSGPRS.begin(),
- TakeSGPRS.begin() + NumAvailableSGPRs);
- } else {
- Entry.ActiveSet.insert(TakeSGPRS.begin(), TakeSGPRS.end());
- unsigned FreeSpace = 0;
- if (L) {
- unsigned LoopMaxSGPRRP =
- getLoopMaxRP(L)[AMDGPU::RegisterPressureSets::SReg_32];
- FreeSpace = NumAvailableSGPRs - (LoopMaxSGPRRP - CandSGPRsNum);
- } else {
- FreeSpace = NumAvailableSGPRs - TakeSGPRsNum;
- }
- NU.getSortedForInstruction(*MBB.instr_begin(), CandSGPRS);
- Entry.ActiveSet.insert(CandSGPRS.begin(), CandSGPRS.begin() + FreeSpace);
- }
+unsigned AMDGPUSSASpiller::getSizeInRegs(const Register VReg) {
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
+ return TRI->getRegClassWeight(RC).RegWeight;
+}
- if (TakeVGPRsNum >= NumAvailableVGPRs) {
- NU.getSortedForInstruction(*MBB.instr_begin(), TakeVGPRS);
- Entry.ActiveSet.insert(TakeVGPRS.begin(),
- TakeVGPRS.begin() + NumAvailableVGPRs);
- } else {
- Entry.ActiveSet.insert(TakeVGPRS.begin(), TakeVGPRS.end());
- unsigned FreeSpace = 0;
- if (L) {
- unsigned LoopMaxVGPRRP =
- getLoopMaxRP(L)[AMDGPU::RegisterPressureSets::VGPR_32];
- FreeSpace = NumAvailableVGPRs - (LoopMaxVGPRRP - CandVGPRsNum);
- } else {
- FreeSpace = NumAvailableVGPRs - TakeVGPRsNum;
- }
- NU.getSortedForInstruction(*MBB.instr_begin(), CandVGPRS);
- Entry.ActiveSet.insert(CandVGPRS.begin(), CandVGPRS.begin() + FreeSpace);
+unsigned AMDGPUSSASpiller::getSizeInRegs(const RegisterSet VRegs) {
+ unsigned Size = 0;
+ for (auto VReg : VRegs) {
+ Size += getSizeInRegs(VReg);
}
+ return Size;
+}
- if (TakeAGPRsNum >= NumAvailableAGPRs) {
- NU.getSortedForInstruction(*MBB.instr_begin(), TakeAGPRS);
- Entry.ActiveSet.insert(TakeAGPRS.begin(),
- TakeAGPRS.begin() + NumAvailableAGPRs);
- } else {
- Entry.ActiveSet.insert(TakeAGPRS.begin(), TakeAGPRS.end());
- unsigned FreeSpace = 0;
- if (L) {
- unsigned LoopMaxAGPRRP =
- getLoopMaxRP(L)[AMDGPU::RegisterPressureSets::AGPR_32];
- FreeSpace = NumAvailableAGPRs - (LoopMaxAGPRRP - CandAGPRsNum);
- } else {
- FreeSpace = NumAvailableAGPRs - TakeAGPRsNum;
- }
- NU.getSortedForInstruction(*MBB.instr_begin(), CandAGPRS);
- Entry.ActiveSet.insert(CandAGPRS.begin(), CandAGPRS.begin() + FreeSpace);
+unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
+ unsigned Capacity) {
+ unsigned Limit = Capacity ? Capacity : NumAvailableRegs;
+ auto &Active = RegisterMap[MBB.getNumber()].ActiveSet;
+ unsigned Size = getSizeInRegs(Active);
+ NU.getSortedForInstruction(*MBB.instr_begin(), S);
+ for (auto VReg : S) {
+ if (Size + getSizeInRegs(VReg) < Limit)
+ Active.insert(VReg);
}
+ return Size;
}
bool AMDGPUSSASpiller::run(MachineFunction &MF) {
- init(MF);
+ init(MF, false);
+ processFunction(MF);
+ init(MF, true);
processFunction(MF);
return false;
}
@@ -447,11 +444,11 @@ bool AMDGPUSSASpiller::run(MachineFunction &MF) {
PreservedAnalyses
llvm::AMDGPUSSASpillerPass::run(MachineFunction &MF,
MachineFunctionAnalysisManager &MFAM) {
- LiveVariables &LV = MFAM.getResult<LiveVariablesAnalysis>(MF);
+ LiveIntervals &LIS = MFAM.getResult<LiveIntervalsAnalysis>(MF);
MachineLoopInfo &LI = MFAM.getResult<MachineLoopAnalysis>(MF);
MachineDominatorTree &MDT = MFAM.getResult<MachineDominatorTreeAnalysis>(MF);
AMDGPUNextUseAnalysis::Result &NU = MFAM.getResult<AMDGPUNextUseAnalysis>(MF);
- AMDGPUSSASpiller Impl(LV, LI, MDT, NU);
+ AMDGPUSSASpiller Impl(LIS, LI, MDT, NU);
bool Changed = Impl.run(MF);
if (!Changed)
return PreservedAnalyses::all();
@@ -475,30 +472,32 @@ class AMDGPUSSASpillerLegacy : public MachineFunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
- AU.addRequired<LiveVariablesWrapperPass>();
- AU.addRequired<MachineDominatorTreeWrapperPass>();
- AU.addRequired<MachineLoopInfoWrapperPass>();
+ AU.addRequiredTransitiveID(MachineLoopInfoID);
+ AU.addPreservedID(MachineLoopInfoID);
+ AU.addRequiredTransitiveID(MachineDominatorsID);
+ AU.addPreservedID(MachineDominatorsID);
+ AU.addRequired<LiveIntervalsWrapperPass>();
AU.addRequired<AMDGPUNextUseAnalysisWrapper>();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
bool AMDGPUSSASpillerLegacy::runOnMachineFunction(MachineFunction &MF) {
- LiveVariables &LV = getAnalysis<LiveVariablesWrapperPass>().getLV();
+ const LiveIntervals &LIS = getAnalysis<LiveIntervalsWrapperPass>().getLIS();
MachineLoopInfo &LI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
MachineDominatorTree &MDT =
getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
AMDGPUNextUseAnalysis::Result &NU =
getAnalysis<AMDGPUNextUseAnalysisWrapper>().getNU();
- AMDGPUSSASpiller Impl(LV, LI, MDT, NU);
+ AMDGPUSSASpiller Impl(LIS, LI, MDT, NU);
return Impl.run(MF);
}
INITIALIZE_PASS_BEGIN(AMDGPUSSASpillerLegacy, DEBUG_TYPE, "AMDGPU SSA Spiller",
false, false)
-INITIALIZE_PASS_DEPENDENCY(LiveVariablesWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AMDGPUNextUseAnalysisWrapper)
INITIALIZE_PASS_END(AMDGPUSSASpillerLegacy, DEBUG_TYPE, "AMDGPU SSA Spiller",
false, false)
>From 97ee4d0e659884648426961dd68da9efe43a74b8 Mon Sep 17 00:00:00 2001
From: Alexander Timofeev <alexander.timofeev at amd.com>
Date: Thu, 12 Dec 2024 23:36:49 +0100
Subject: [PATCH 03/46] SSA Spiller. First version that really works on the
very simple code.
1. RP is computed in terms of 32bit reg units number.
2. limit() works not by the number of registers but by the number
of 32bit reg units.
3. Next Use Distance computed for the point between def and use in block.
4. If the first use, cached in VRegs map is before the MI,
we scan the block for another uses before checking the next use
in predecessors.
5. A lot of minor bugs fixed.
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 108 ++++++++++++++----
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 20 +++-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 96 ++++++++++------
3 files changed, 168 insertions(+), 56 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index d66c9a9a35818..a7c2c76b4d76d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -133,37 +133,104 @@ unsigned NextUseResult::computeNextUseDistance(const MachineBasicBlock &MBB,
if (VRegs.contains(VReg)) {
int UseDist = VRegs[VReg];
if ((UseDist - IDist) < 0) {
- for (auto Succ : successors(&MBB)) {
- if (auto SuccVMapRef = getVRegMap(Succ)) {
- VRegDistances &SuccVRegs = SuccVMapRef.value();
- if (SuccVRegs.contains(VReg)) {
- Dist = std::min(Dist, SuccVRegs[VReg]);
+
+ // FIXME: VRegs contains only upward exposed info! In other words - the
+ // very first use in block!
+ // (UseDist - IDist) < 0 just means that our MI is later then the 1st
+ // use of the VReg.
+ // Function user (calls from outside: from SSASpiller) is interested in
+ // the next use in block after the MI!
+ // We need to scan for the uses in current block - from MI to the block
+ // end BEFORE checking the Succs!
+
+ // NOTE: Make sure that we don't spoil the info for Next Use analysis
+ // itself. If so, we need 2 different functions for querying
+ // nextUseDistance!
+ bool Done = false;
+ MachineInstr *Instr = Indexes->getInstructionFromIndex(I);
+ if (Instr) {
+ // we canot use SlotIndexes to compare positions because
+ // spills/reloads were not added in Instruction Index. So, just scan
+ // the BB.
+ unsigned D = 0;
+ MachineBasicBlock::iterator It(Instr);
+ while (It != MBB.end()) {
+ if (It->definesRegister(VReg, TRI)) {
+ // VReg is DEAD
+ Dist = Infinity;
+ Done = true;
+ break;
}
+ if (It->readsRegister(VReg, TRI)) {
+ Dist = D;
+ Done = true;
+ break;
+ }
+ D++;
+ It++;
}
}
+ if (!Done)
+ // The instruction of interest is after the first use of the register
+ // in the block and the register has not been killed in block. Look
+ // for the next use in successors.
+ for (auto Succ : successors(&MBB)) {
+ if (auto SuccVMapRef = getVRegMap(Succ)) {
+ VRegDistances &SuccVRegs = SuccVMapRef.value();
+ if (SuccVRegs.contains(VReg)) {
+ Dist = std::min(Dist, SuccVRegs[VReg]);
+ }
+ }
+ }
} else {
Dist = UseDist - IDist;
}
} else {
// We hit a case when the VReg is defined and used inside the block.
- // Let's see if the I is in between.
- MachineInstr *Def = MRI->getVRegDef(VReg);
- assert(Def && "Neither use distance no Def found for reg!");
- SlotIndex DefIdx = Indexes->getInstructionIndex(*Def);
- assert(DefIdx.isValid() && "Register Def not in the Index");
- if (SlotIndex::isEarlierInstr(DefIdx, I)) {
- // "I" is after the Def
- for (auto &U : MRI->use_instructions(VReg)) {
- assert(U.getParent() == &MBB &&
- "Use out of the block fount but distance was not recorded");
- SlotIndex UIdx = Indexes->getInstructionIndex(U);
- if (SlotIndex::isEarlierInstr(I, UIdx)) {
- unsigned UDist = I.distance(UIdx)/SlotIndex::InstrDist;
- if (UDist < Dist)
- Dist = UDist;
+ // Let's see if I is in between. Since we may be called from the broken
+ // SSA function we cannot rely on MRI.getVRegDef. The VReg Def in block
+ // may be reload, so we canot use SlotIndexes to compare positions because
+ // spills/reloads were not added in Instruction Index. So, just scan the
+ // BB.
+ MachineInstr *Instr = Indexes->getInstructionFromIndex(I);
+ if (Instr) {
+ bool DefSeen = false, InstrSeen = false;
+ unsigned D = 0;
+ for (auto &MI : MBB) {
+ if (InstrSeen)
+ D++;
+ if (Instr == &MI) {
+ if (!DefSeen)
+ break;
+ InstrSeen = true;
+ }
+
+ if (MI.definesRegister(VReg, TRI))
+ DefSeen = true;
+ if (MI.readsRegister(VReg, TRI) && InstrSeen) {
+ Dist = D;
+ break;
}
}
}
+
+ // MachineInstr *Def = MRI->getVRegDef(VReg);
+ // assert(Def && "Neither use distance no Def found for reg!");
+ // SlotIndex DefIdx = Indexes->getInstructionIndex(*Def);
+ // assert(DefIdx.isValid() && "Register Def not in the Index");
+ // if (SlotIndex::isEarlierInstr(DefIdx, I)) {
+ // // "I" is after the Def
+ // for (auto &U : MRI->use_instructions(VReg)) {
+ // assert(U.getParent() == &MBB &&
+ // "Use out of the block fount but distance was not recorded");
+ // SlotIndex UIdx = Indexes->getInstructionIndex(U);
+ // if (SlotIndex::isEarlierInstr(I, UIdx)) {
+ // unsigned UDist = I.distance(UIdx)/SlotIndex::InstrDist;
+ // if (UDist < Dist)
+ // Dist = UDist;
+ // }
+ // }
+ // }
}
if (Dist != Infinity)
InstrCache[&I][VReg] = Dist;
@@ -208,6 +275,7 @@ bool AMDGPUNextUseAnalysisWrapper::runOnMachineFunction(
NU.Indexes = &getAnalysis<SlotIndexesWrapperPass>().getSI();
NU.LI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
NU.MRI = &MF.getRegInfo();
+ NU.TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
assert(NU.MRI->isSSA());
NU.init(MF);
NU.analyze(MF);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 39ec6ccc895f1..849ef3d2ad5b1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -14,6 +14,9 @@
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/SlotIndexes.h"
+#include "SIRegisterInfo.h"
+#include "GCNSubtarget.h"
+
#include <limits>
using namespace llvm;
@@ -24,6 +27,7 @@ class NextUseResult {
friend class AMDGPUNextUseAnalysisWrapper;
SlotIndexes *Indexes;
const MachineRegisterInfo *MRI;
+ const SIRegisterInfo *TRI;
MachineLoopInfo *LI;
DenseMap<const SlotIndex *, DenseMap<Register, unsigned>> InstrCache;
public:
@@ -138,8 +142,18 @@ class NextUseResult {
return getNextUseDistance(MI, R) == Infinity;
}
- void getSortedForInstruction(const MachineInstr &MI,
+ void getSortedForBlockEnd(MachineBasicBlock &MBB,
SetVector<Register> &Regs) {
+ auto SortByDist = [&](const Register LHS, const Register RHS) {
+ return getNextUseDistance(MBB, LHS) < getNextUseDistance(MBB, RHS);
+ };
+ SmallVector<Register> Tmp(Regs.takeVector());
+ sort(Tmp, SortByDist);
+ Regs.insert(Tmp.begin(), Tmp.end());
+ }
+
+ void getSortedForInstruction(const MachineInstr &MI,
+ SetVector<Register> &Regs) {
auto SortByDist = [&](const Register LHS, const Register RHS) {
return getNextUseDistance(MI, LHS) < getNextUseDistance(MI, RHS);
};
@@ -148,8 +162,8 @@ class NextUseResult {
Regs.insert(Tmp.begin(), Tmp.end());
}
- std::vector<std::pair<Register, unsigned>> getSortedByDistance(
- const MachineInstr &MI, std::vector<Register> &W) {
+ std::vector<std::pair<Register, unsigned>>
+ getSortedByDistance(const MachineInstr &MI, std::vector<Register> &W) {
std::vector<std::pair<Register, unsigned>> Result;
auto compareByVal = [](std::pair<Register, unsigned> &LHS,
std::pair<Register, unsigned> &RHS) -> bool {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index bbdb82b5cda55..cf53422a7717e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -75,11 +75,12 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
MFI = &MF.getFrameInfo();
TRI = ST->getRegisterInfo();
TII = ST->getInstrInfo();
- NumAvailableRegs = IsVGPRsPass
- ? TRI->getRegPressureSetLimit(
- MF, AMDGPU::RegisterPressureSets::VGPR_32)
- : TRI->getRegPressureSetLimit(
- MF, AMDGPU::RegisterPressureSets::SReg_32);
+ NumAvailableRegs =
+ IsVGPRsPass ? ST->getMaxNumVGPRs(MF) : ST->getMaxNumSGPRs(MF);
+ // ? TRI->getRegPressureSetLimit(
+ // MF, AMDGPU::RegisterPressureSets::VGPR_32)
+ // : TRI->getRegPressureSetLimit(
+ // MF, AMDGPU::RegisterPressureSets::SReg_32);
}
SpillInfo &getBlockInfo(const MachineBasicBlock &MBB);
@@ -97,10 +98,10 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
void spillBefore(Register VReg, MachineBasicBlock::iterator InsertBefore);
unsigned getLoopMaxRP(MachineLoop *L);
- void limit(RegisterSet &Active, RegisterSet &Spilled,
+ void limit(MachineBasicBlock &MBB, RegisterSet &Active, RegisterSet &Spilled,
MachineBasicBlock::iterator I,
const RegisterSet Defs = RegisterSet());
-
+
unsigned getSizeInRegs(const Register VReg);
unsigned getSizeInRegs(const RegisterSet VRegs);
bool takeReg(Register R) {
@@ -157,8 +158,9 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
auto &Entry = RegisterMap[MBB.getNumber()];
RegisterSet &Active = Entry.ActiveSet;
RegisterSet &Spilled = Entry.SpillSet;
- RegisterSet Reloads;
+
for (MachineBasicBlock::iterator I : MBB) {
+ RegisterSet Reloads;
for (auto U : I->uses()) {
if (!U.isReg())
continue;
@@ -167,12 +169,18 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
Register VReg = U.getReg();
if (!takeReg(VReg))
continue;
+ // if (U.getSubReg()) {
+ // dbgs() << U << "\n";
+ // }
if (Active.insert(VReg)) {
// Not in reg, hence, should have been spilled before
// FIXME: This is ODD as the Spilled set is a union among all
// predecessors and should already contain all spilled before!
- Spilled.insert(U.getReg());
- Reloads.insert(VReg);
+ // SPECIAL CASE: undef
+ if (!U.isUndef()) {
+ Spilled.insert(VReg);
+ Reloads.insert(VReg);
+ }
}
}
RegisterSet Defs;
@@ -184,8 +192,8 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (Reloads.empty() && Defs.empty())
continue;
- limit(Active, Spilled, I);
- limit(Active, Spilled, std::next(I), Defs);
+ limit(MBB, Active, Spilled, I);
+ limit(MBB, Active, Spilled, I, Defs);
// FIXME: limit with Defs is assumed to create room for the registers being
// defined by I. Calling with std::next(I) makes spills inserted AFTER I!!!
Active.insert(Defs.begin(), Defs.end());
@@ -242,14 +250,16 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
for (auto Pred : Preds) {
auto PE = getBlockInfo(*Pred);
RegisterSet ReloadInPred = set_difference(Entry.ActiveSet, PE.ActiveSet);
- // We're about to insert N reloads at the end of the predecessor block.
- // Make sure we have enough registers for N definitions or spill to make
- // room for them.
- limit(PE.ActiveSet, PE.SpillSet, Pred->end(), ReloadInPred);
- for (auto R : ReloadInPred) {
- reloadAtEnd(*Pred, R);
- // FIXME: Do we need to update sets?
- PE.ActiveSet.insert(R);
+ if (!ReloadInPred.empty()) {
+ // We're about to insert N reloads at the end of the predecessor block.
+ // Make sure we have enough registers for N definitions or spill to make
+ // room for them.
+ limit(*Pred, PE.ActiveSet, PE.SpillSet, Pred->end(), ReloadInPred);
+ for (auto R : ReloadInPred) {
+ reloadAtEnd(*Pred, R);
+ // FIXME: Do we need to update sets?
+ PE.ActiveSet.insert(R);
+ }
}
for (auto S : set_intersection(set_difference(Entry.SpillSet, PE.SpillSet),
@@ -380,26 +390,43 @@ unsigned AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
return MaxRP;
}
-void AMDGPUSSASpiller::limit(RegisterSet &Active, RegisterSet &Spilled,
- MachineBasicBlock::iterator I, const RegisterSet Defs) {
+void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
+ RegisterSet &Spilled,
+ MachineBasicBlock::iterator I,
+ const RegisterSet Defs) {
MachineBasicBlock::iterator LimitPoint = I;
if (!Defs.empty()) {
- LimitPoint++;
+ if (LimitPoint != MBB.end())
+ LimitPoint = std::next(LimitPoint);
}
unsigned CurRP = getSizeInRegs(Active);
- if(CurRP < NumAvailableRegs - Defs.size())
+ unsigned DefsRP = getSizeInRegs(Defs);
+ if(CurRP <= NumAvailableRegs - DefsRP)
return;
- unsigned Limit = CurRP - NumAvailableRegs + Defs.size();
+ if (LimitPoint == MBB.end())
+ NU.getSortedForBlockEnd(MBB, Active);
+ else
+ NU.getSortedForInstruction(*LimitPoint, Active);
- NU.getSortedForInstruction(*LimitPoint, Active);
- RegisterSet Tmp(Active.end() - Limit, Active.end());
- Active.set_subtract(Tmp);
- Tmp.set_subtract(Spilled);
- for (auto R : Tmp) {
- if (!NU.isDead(*I, R)) {
+
+ unsigned ShrinkTo = NumAvailableRegs - DefsRP;
+ RegisterSet ToSpill;
+ while (CurRP > ShrinkTo) {
+ auto R = Active.pop_back_val();
+ unsigned RegSize = getSizeInRegs(R);
+ CurRP -= RegSize;
+ if (!Spilled.contains(R))
+ ToSpill.insert(R);
+ }
+
+ for (auto R : ToSpill) {
+
+ bool Alive = (LimitPoint == MBB.end()) ? !NU.isDead(MBB, R)
+ : !NU.isDead(*LimitPoint, R);
+ if (Alive) {
spillBefore(R, I);
Spilled.insert(R);
}
@@ -426,8 +453,11 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Size = getSizeInRegs(Active);
NU.getSortedForInstruction(*MBB.instr_begin(), S);
for (auto VReg : S) {
- if (Size + getSizeInRegs(VReg) < Limit)
- Active.insert(VReg);
+ unsigned RSize = getSizeInRegs(VReg);
+ if (Size + RSize < Limit) {
+ Active.insert(VReg);
+ Size += RSize;
+ }
}
return Size;
}
>From e396cc7cf11ed118b9bbc43c627c859c2cf1cc6a Mon Sep 17 00:00:00 2001
From: Alexander Timofeev <alexander.timofeev at amd.com>
Date: Wed, 18 Dec 2024 18:04:03 +0100
Subject: [PATCH 04/46] SSA Spiller. Timers for analysis of compile time
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 20 ++++++-
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 6 ++
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 57 ++++++++++++++++---
3 files changed, 73 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index a7c2c76b4d76d..28c238bad4b2e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -13,6 +13,7 @@
#include "llvm/Passes/PassPlugin.h"
#include "llvm/InitializePasses.h"
#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Support/Timer.h"
#include "AMDGPU.h"
@@ -26,7 +27,11 @@ using namespace llvm;
void NextUseResult::init(const MachineFunction &MF) {
-
+ TG = new TimerGroup("Next Use Analysis",
+ "Compilation Timers for Next Use Analysis");
+ T1 = new Timer("Next Use Analysis", "Time spent in analyse()", *TG);
+ T2 = new Timer("Next Use Analysis", "Time spent in computeNextUseDistance()",
+ *TG);
for (auto L : LI->getLoopsInPreorder()) {
SmallVector<MachineBasicBlock *> Exiting;
L->getExitingBlocks(Exiting);
@@ -41,6 +46,9 @@ void NextUseResult::init(const MachineFunction &MF) {
}
void NextUseResult::analyze(const MachineFunction &MF) {
+
+ T1->startTimer();
+ dbgs() << "Next Use Analysis start\n";
bool Changed = true;
while(Changed) {
Changed = false;
@@ -91,13 +99,15 @@ void NextUseResult::analyze(const MachineFunction &MF) {
}
}
VRegDistances &Next = NextUseMap[MBB->getNumber()];
- dbgs() << "MBB_" << MBB->getNumber() << "\n";
- printVregDistancesD(Next);
+ // dbgs() << "MBB_" << MBB->getNumber() << "\n";
+ // printVregDistancesD(Next);
bool Changed4MBB = diff(Prev, Next);
Changed |= Changed4MBB;
}
}
+ T1->stopTimer();
+ TG->print(llvm::errs());
}
unsigned NextUseResult::getNextUseDistance(const MachineInstr &MI,
@@ -123,6 +133,8 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
unsigned NextUseResult::computeNextUseDistance(const MachineBasicBlock &MBB,
const SlotIndex I,
Register VReg) {
+ T2->startTimer();
+
unsigned Dist = Infinity;
SlotIndex Begin = Indexes->getMBBStartIdx(MBB.getNumber());
@@ -235,6 +247,8 @@ unsigned NextUseResult::computeNextUseDistance(const MachineBasicBlock &MBB,
if (Dist != Infinity)
InstrCache[&I][VReg] = Dist;
}
+ T2->stopTimer();
+ TG->print(llvm::errs());
return Dist;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 849ef3d2ad5b1..f033d7f1779a7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -30,6 +30,12 @@ class NextUseResult {
const SIRegisterInfo *TRI;
MachineLoopInfo *LI;
DenseMap<const SlotIndex *, DenseMap<Register, unsigned>> InstrCache;
+
+ TimerGroup *TG;
+ Timer *T1;
+ Timer *T2;
+
+
public:
using VRegDistances = DenseMap<Register, unsigned>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index cf53422a7717e..03a0224716265 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -9,6 +9,7 @@
#include "llvm/Pass.h"
#include "llvm/Passes/PassPlugin.h"
#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Support/Timer.h"
#include "llvm/Target/TargetMachine.h"
#include "AMDGPUNextUseAnalysis.h"
@@ -39,6 +40,12 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
const GCNSubtarget *ST;
MachineFrameInfo *MFI;
+ TimerGroup *TG;
+ Timer *T1;
+ Timer *T2;
+ Timer *T3;
+ Timer *T4;
+
using RegisterSet = SetVector<Register>;
struct SpillInfo {
@@ -75,6 +82,12 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
MFI = &MF.getFrameInfo();
TRI = ST->getRegisterInfo();
TII = ST->getInstrInfo();
+ TG = new TimerGroup("SSA SPiller Timing", "Time Spent in different parts of the SSA Spiller");
+ T1 = new Timer("General time", "ProcessFunction", *TG);
+ T2 = new Timer("Limit", "Time spent in limit()", *TG);
+ T3 = new Timer("Initialization time", "Init Active Sets", *TG);
+ T4 = new Timer("Instruction processing time", "Process Instruction w/o limit", *TG);
+
NumAvailableRegs =
IsVGPRsPass ? ST->getMaxNumVGPRs(MF) : ST->getMaxNumSGPRs(MF);
// ? TRI->getRegPressureSetLimit(
@@ -118,6 +131,13 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
AMDGPUSSASpiller(const LiveIntervals &LIS, MachineLoopInfo &LI,
MachineDominatorTree &MDT, AMDGPUNextUseAnalysis::Result &NU)
: LIS(LIS), LI(LI), MDT(MDT), NU(NU) {}
+ ~AMDGPUSSASpiller() {
+ delete TG;
+ delete T2;
+ delete T3;
+ delete T4;
+ //delete TG;
+ }
bool run(MachineFunction &MF);
};
@@ -130,15 +150,20 @@ AMDGPUSSASpiller::getBlockInfo(const MachineBasicBlock &MBB) {
void AMDGPUSSASpiller::processFunction(MachineFunction &MF) {
ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
+
+ T1->startTimer();
for (auto MBB : RPOT) {
+
+ T3->startTimer();
if (LI.isLoopHeader(MBB)) {
initActiveSetLoopHeader(*MBB);
} else {
initActiveSetUsualBlock(*MBB);
}
connectToPredecessors(*MBB);
+ T3->stopTimer();
processBlock(*MBB);
- dump();
+ // dump();
// We process loop blocks twice: once with Spill/Active sets of
// loop latch blocks unknown, and then again as soon as the latch blocks
// sets are computed.
@@ -152,6 +177,7 @@ void AMDGPUSSASpiller::processFunction(MachineFunction &MF) {
PostponedLoopLatches.erase(MBB->getNumber());
}
}
+ T1->stopTimer();
}
void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
@@ -161,6 +187,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
for (MachineBasicBlock::iterator I : MBB) {
RegisterSet Reloads;
+ T4->startTimer();
for (auto U : I->uses()) {
if (!U.isReg())
continue;
@@ -189,17 +216,23 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
Defs.insert(D.getReg());
}
- if (Reloads.empty() && Defs.empty())
+ if (Reloads.empty() && Defs.empty()) {
+ T4->stopTimer();
continue;
-
+ }
+ T4->stopTimer();
+
limit(MBB, Active, Spilled, I);
limit(MBB, Active, Spilled, I, Defs);
+
+ T4->startTimer();
// FIXME: limit with Defs is assumed to create room for the registers being
// defined by I. Calling with std::next(I) makes spills inserted AFTER I!!!
Active.insert(Defs.begin(), Defs.end());
// Add reloads for VRegs in Reloads before I
for (auto R : Reloads)
reloadBefore(R, I);
+ T4->stopTimer();
}
// Now, clear dead registers.
RegisterSet Deads;
@@ -242,9 +275,9 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
}
for (auto Pred : Preds) {
- dumpRegSet(getBlockInfo(*Pred).SpillSet);
+ // dumpRegSet(getBlockInfo(*Pred).SpillSet);
Entry.SpillSet.set_union(getBlockInfo(*Pred).SpillSet);
- dumpRegSet(Entry.SpillSet);
+ // dumpRegSet(Entry.SpillSet);
}
set_intersect(Entry.SpillSet, Entry.ActiveSet);
for (auto Pred : Preds) {
@@ -394,6 +427,7 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
RegisterSet &Spilled,
MachineBasicBlock::iterator I,
const RegisterSet Defs) {
+ //T2->startTimer();
MachineBasicBlock::iterator LimitPoint = I;
if (!Defs.empty()) {
@@ -403,13 +437,18 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
unsigned CurRP = getSizeInRegs(Active);
unsigned DefsRP = getSizeInRegs(Defs);
- if(CurRP <= NumAvailableRegs - DefsRP)
+ if(CurRP <= NumAvailableRegs - DefsRP) {
+ //T2->stopTimer();
return;
+ }
if (LimitPoint == MBB.end())
NU.getSortedForBlockEnd(MBB, Active);
- else
+ else {
+ T2->startTimer();
NU.getSortedForInstruction(*LimitPoint, Active);
+ T2->stopTimer();
+ }
unsigned ShrinkTo = NumAvailableRegs - DefsRP;
@@ -431,6 +470,7 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
Spilled.insert(R);
}
}
+ //T2->stopTimer();
}
unsigned AMDGPUSSASpiller::getSizeInRegs(const Register VReg) {
@@ -466,7 +506,10 @@ bool AMDGPUSSASpiller::run(MachineFunction &MF) {
init(MF, false);
processFunction(MF);
init(MF, true);
+
processFunction(MF);
+ dbgs() << "SSA Spiller end\n";
+ TG->print(llvm::errs());
return false;
}
} // namespace
>From 896556f7ecb432cdd96bf5b327d4bd9c84992d92 Mon Sep 17 00:00:00 2001
From: Alexander Timofeev <alexander.timofeev at amd.com>
Date: Wed, 18 Dec 2024 18:13:22 +0100
Subject: [PATCH 05/46] SSA Spiller WIP 18.12
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 44 ++++++++++++-------
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 15 ++++++-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 17 ++++++-
3 files changed, 56 insertions(+), 20 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 28c238bad4b2e..706570b1d818f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -74,29 +74,39 @@ void NextUseResult::analyze(const MachineFunction &MF) {
mergeDistances(Curr, SuccMapRef.value(), Weight);
}
}
- unsigned MBBLen =
- Begin.distance(Indexes->getMBBEndIdx(MBB)) / SlotIndex::InstrDist;
- for (auto &P : Curr) {
- P.second += MBBLen;
- }
-
- NextUseMap[MBB->getNumber()] = std::move(Curr);
+ // unsigned MBBLen =
+ // Begin.distance(Indexes->getMBBEndIdx(MBB)) / SlotIndex::InstrDist;
+ // for (auto &P : Curr) {
+ // P.second += MBBLen;
+ // }
+ // NextUseMap[MBB->getNumber()] = std::move(Curr);
+
+ DenseMap<Register, unsigned> CurrentBlockRegUses;
+ unsigned CurrDist = 0;
for (auto &MI : make_range(MBB->rbegin(), MBB->rend())) {
for (auto &MO : MI.operands()) {
- if (MO.isReg() && MO.getReg().isVirtual() && MO.isUse()) {
+ if (MO.isReg() && MO.getReg().isVirtual()) {
Register VReg = MO.getReg();
- MachineInstr *Def = MRI->getVRegDef(VReg);
- if (Def && Def->getParent() == MBB)
- // defined in block - skip it
- continue;
- unsigned Distance =
- Begin.distance(Indexes->getInstructionIndex(MI)) /
- SlotIndex::InstrDist;
- setNextUseDistance(MBB, VReg, Distance);
- UsedInBlock[MBB->getNumber()].insert(VReg);
+ // MachineInstr *Def = MRI->getVRegDef(VReg);
+ // if (Def && Def->getParent() == MBB)
+ // // defined in block - skip it
+ // continue;
+ // unsigned Distance =
+ // Begin.distance(Indexes->getInstructionIndex(MI)) /
+ // SlotIndex::InstrDist;
+ // setNextUseDistance(MBB, VReg, Distance);
+ if(MO.isUse()) {
+ CurrentBlockRegUses[VReg] = CurrDist;
+ UsedInBlock[MBB->getNumber()].insert(VReg);
+ } else if (MO.isDef()) {
+ if (CurrentBlockRegUses.contains(&MI))
+ CurrentBlockRegUses.erase(VReg);
+ }
}
}
+ InstrCache[&MI] = std::move(CurrentBlockRegUses);
+ CurrDist++;
}
VRegDistances &Next = NextUseMap[MBB->getNumber()];
// dbgs() << "MBB_" << MBB->getNumber() << "\n";
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index f033d7f1779a7..e2f77490d33eb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -29,13 +29,14 @@ class NextUseResult {
const MachineRegisterInfo *MRI;
const SIRegisterInfo *TRI;
MachineLoopInfo *LI;
- DenseMap<const SlotIndex *, DenseMap<Register, unsigned>> InstrCache;
TimerGroup *TG;
Timer *T1;
Timer *T2;
+ DenseMap<const MachineInstr *, DenseMap<Register, unsigned>> InstrCache;
+
public:
using VRegDistances = DenseMap<Register, unsigned>;
@@ -160,6 +161,18 @@ class NextUseResult {
void getSortedForInstruction(const MachineInstr &MI,
SetVector<Register> &Regs) {
+ // auto SortByDist = [&](const Register LHS, const Register RHS) {
+ // unsigned LDist = getNextUseDistance(MI, LHS);
+ // unsigned RDist = getNextUseDistance(MI, RHS);
+ // if (LDist == RDist) {
+ // const TargetRegisterClass *LRC = TRI->getRegClassForReg(*MRI, LHS);
+ // unsigned LSize = TRI->getRegClassWeight(LRC).RegWeight;
+ // const TargetRegisterClass *RRC = TRI->getRegClassForReg(*MRI, RHS);
+ // unsigned RSize = TRI->getRegClassWeight(RRC).RegWeight;
+ // return LSize < RSize;
+ // }
+ // return LDist < RDist;
+ // };
auto SortByDist = [&](const Register LHS, const Register RHS) {
return getNextUseDistance(MI, LHS) < getNextUseDistance(MI, RHS);
};
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 03a0224716265..96bebeaaffe4f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -275,9 +275,9 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
}
for (auto Pred : Preds) {
- // dumpRegSet(getBlockInfo(*Pred).SpillSet);
+ //dumpRegSet(getBlockInfo(*Pred).SpillSet);
Entry.SpillSet.set_union(getBlockInfo(*Pred).SpillSet);
- // dumpRegSet(Entry.SpillSet);
+ //dumpRegSet(Entry.SpillSet);
}
set_intersect(Entry.SpillSet, Entry.ActiveSet);
for (auto Pred : Preds) {
@@ -453,6 +453,19 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
unsigned ShrinkTo = NumAvailableRegs - DefsRP;
RegisterSet ToSpill;
+
+ // unsigned SizeToSpill = CurRP - ShrinkTo;
+ // for (auto R : reverse(Active)) {
+ // if (SizeToSpill == 0)
+ // break;
+ // unsigned RegSize = getSizeInRegs(R);
+ // if (RegSize <= SizeToSpill) {
+ // ToSpill.insert(R);
+ // SizeToSpill -= RegSize;
+ // }
+ // }
+ // Active.set_subtract(ToSpill);
+
while (CurRP > ShrinkTo) {
auto R = Active.pop_back_val();
unsigned RegSize = getSizeInRegs(R);
>From 2930ced0a19f4f18deec8588cfe7656adbbdbb96 Mon Sep 17 00:00:00 2001
From: Alexander Timofeev <alexander.timofeev at amd.com>
Date: Thu, 19 Dec 2024 14:11:35 +0100
Subject: [PATCH 06/46] SSA Spiller compile time issue solved
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 6 +--
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 40 ++++++++++---------
2 files changed, 25 insertions(+), 21 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 706570b1d818f..c60eb9bf83337 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -143,7 +143,7 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
unsigned NextUseResult::computeNextUseDistance(const MachineBasicBlock &MBB,
const SlotIndex I,
Register VReg) {
- T2->startTimer();
+ //T2->startTimer();
unsigned Dist = Infinity;
@@ -257,8 +257,8 @@ unsigned NextUseResult::computeNextUseDistance(const MachineBasicBlock &MBB,
if (Dist != Infinity)
InstrCache[&I][VReg] = Dist;
}
- T2->stopTimer();
- TG->print(llvm::errs());
+ //T2->stopTimer();
+ //TG->print(llvm::errs());
return Dist;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 96bebeaaffe4f..c35c1fe1f0683 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -427,7 +427,6 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
RegisterSet &Spilled,
MachineBasicBlock::iterator I,
const RegisterSet Defs) {
- //T2->startTimer();
MachineBasicBlock::iterator LimitPoint = I;
if (!Defs.empty()) {
@@ -435,20 +434,31 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
LimitPoint = std::next(LimitPoint);
}
+ Active.remove_if([&](Register R) {
+ return (LimitPoint == MBB.end()) ? NU.isDead(MBB, R)
+ : NU.isDead(*LimitPoint, R);
+ });
+
unsigned CurRP = getSizeInRegs(Active);
unsigned DefsRP = getSizeInRegs(Defs);
- if(CurRP <= NumAvailableRegs - DefsRP) {
- //T2->stopTimer();
+ if (CurRP <= NumAvailableRegs - DefsRP)
return;
- }
- if (LimitPoint == MBB.end())
- NU.getSortedForBlockEnd(MBB, Active);
- else {
- T2->startTimer();
- NU.getSortedForInstruction(*LimitPoint, Active);
- T2->stopTimer();
+ //T2->startTimer();
+ DenseMap<Register, unsigned> M;
+ for (auto R : Active) {
+ unsigned D = (LimitPoint == MBB.end())
+ ? NU.getNextUseDistance(MBB, R)
+ : NU.getNextUseDistance(*LimitPoint, R);
+ M[R] = D;
}
+ auto SortByDist = [&](const Register LHS, const Register RHS) {
+ return M[LHS] < M[RHS];
+ };
+ SmallVector<Register> Tmp(Active.takeVector());
+ sort(Tmp, SortByDist);
+ Active.insert(Tmp.begin(), Tmp.end());
+ //T2->stopTimer();
unsigned ShrinkTo = NumAvailableRegs - DefsRP;
@@ -475,15 +485,9 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
}
for (auto R : ToSpill) {
-
- bool Alive = (LimitPoint == MBB.end()) ? !NU.isDead(MBB, R)
- : !NU.isDead(*LimitPoint, R);
- if (Alive) {
- spillBefore(R, I);
- Spilled.insert(R);
- }
+ spillBefore(R, I);
+ Spilled.insert(R);
}
- //T2->stopTimer();
}
unsigned AMDGPUSSASpiller::getSizeInRegs(const Register VReg) {
>From 31ce65399e10ab724fae2d2cb018e689df925fb7 Mon Sep 17 00:00:00 2001
From: Alexander Timofeev <alexander.timofeev at amd.com>
Date: Sat, 21 Dec 2024 01:27:22 +0100
Subject: [PATCH 07/46] SSA Spiller. Next Use Analysis reimplemented to map
each instruction to RegSet
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 354 +++++++++---------
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 201 +++++-----
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 137 ++++---
3 files changed, 358 insertions(+), 334 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index c60eb9bf83337..1ceebc132fc6f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -46,24 +46,28 @@ void NextUseResult::init(const MachineFunction &MF) {
}
void NextUseResult::analyze(const MachineFunction &MF) {
-
+ // Upward-exposed distances are only necessary to convey the data flow from
+ // the block to its predecessors. No need to store it beyond the analyze
+ // function as the analysis users are only interested in the use distances
+ // relatively to the given MI or the given block end.
+ DenseMap<unsigned, VRegDistances> UpwardNextUses;
T1->startTimer();
- dbgs() << "Next Use Analysis start\n";
bool Changed = true;
while(Changed) {
Changed = false;
for (auto MBB : post_order(&MF)) {
- SlotIndex Begin = Indexes->getMBBStartIdx(MBB);
+ unsigned MBBNum = MBB->getNumber();
VRegDistances Curr, Prev;
- if (auto CurrMapRef = getVRegMap(MBB)) {
- Prev = CurrMapRef.value();
+ if (UpwardNextUses.contains(MBBNum)) {
+ Prev = UpwardNextUses[MBBNum];
}
for (auto Succ : successors(MBB)) {
- auto SuccMapRef = getVRegMap(Succ);
+ unsigned SuccNum = Succ->getNumber();
- if (SuccMapRef) {
+ if (UpwardNextUses.contains(SuccNum)) {
+ VRegDistances SuccDist = UpwardNextUses[SuccNum];
// Check if the edge from MBB to Succ goes out of the Loop
unsigned Weight = 0;
if (EdgeWeigths.contains(MBB->getNumber())) {
@@ -71,47 +75,36 @@ void NextUseResult::analyze(const MachineFunction &MF) {
if (Succ->getNumber() == SuccNum)
Weight = Infinity;
}
- mergeDistances(Curr, SuccMapRef.value(), Weight);
+ mergeDistances(Curr, SuccDist, Weight);
}
}
- // unsigned MBBLen =
- // Begin.distance(Indexes->getMBBEndIdx(MBB)) / SlotIndex::InstrDist;
- // for (auto &P : Curr) {
- // P.second += MBBLen;
- // }
-
- // NextUseMap[MBB->getNumber()] = std::move(Curr);
-
- DenseMap<Register, unsigned> CurrentBlockRegUses;
- unsigned CurrDist = 0;
+
+ NextUseMap[MBBNum].Bottom = Curr;
+
for (auto &MI : make_range(MBB->rbegin(), MBB->rend())) {
+
+ for (auto &P : Curr) {
+ P.second++;
+ }
+
for (auto &MO : MI.operands()) {
if (MO.isReg() && MO.getReg().isVirtual()) {
Register VReg = MO.getReg();
- // MachineInstr *Def = MRI->getVRegDef(VReg);
- // if (Def && Def->getParent() == MBB)
- // // defined in block - skip it
- // continue;
- // unsigned Distance =
- // Begin.distance(Indexes->getInstructionIndex(MI)) /
- // SlotIndex::InstrDist;
- // setNextUseDistance(MBB, VReg, Distance);
if(MO.isUse()) {
- CurrentBlockRegUses[VReg] = CurrDist;
+ Curr[VReg] = 0;
UsedInBlock[MBB->getNumber()].insert(VReg);
} else if (MO.isDef()) {
- if (CurrentBlockRegUses.contains(&MI))
- CurrentBlockRegUses.erase(VReg);
+ Curr.erase(VReg);
}
}
}
- InstrCache[&MI] = std::move(CurrentBlockRegUses);
- CurrDist++;
+ NextUseMap[MBBNum].InstrDist[&MI] = Curr;
+ // printVregDistancesD(Curr);
}
- VRegDistances &Next = NextUseMap[MBB->getNumber()];
- // dbgs() << "MBB_" << MBB->getNumber() << "\n";
- // printVregDistancesD(Next);
- bool Changed4MBB = diff(Prev, Next);
+
+ UpwardNextUses[MBBNum] = std::move(Curr);
+
+ bool Changed4MBB = diff(Prev, UpwardNextUses[MBBNum]);
Changed |= Changed4MBB;
}
@@ -120,148 +113,169 @@ void NextUseResult::analyze(const MachineFunction &MF) {
TG->print(llvm::errs());
}
-unsigned NextUseResult::getNextUseDistance(const MachineInstr &MI,
+unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock::iterator I,
const Register VReg) {
- SlotIndex Idx = Indexes->getInstructionIndex(MI);
- assert(Idx.isValid() && "Invalid Instruction index!");
- if (InstrCache.contains(&Idx) && InstrCache[&Idx].contains(VReg)) {
- return InstrCache[&Idx][VReg];
- }
- return computeNextUseDistance(*MI.getParent(), Idx, VReg);
+ unsigned Dist = Infinity;
+ const MachineBasicBlock *MBB = I->getParent();
+ unsigned MBBNum = MBB->getNumber();
+ if (NextUseMap.contains(MBBNum) &&
+ NextUseMap[MBBNum].InstrDist.contains(&*I) &&
+ NextUseMap[MBBNum].InstrDist[&*I].contains(VReg))
+ Dist = NextUseMap[MBBNum].InstrDist[&*I][VReg];
+ return Dist;
}
unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
- Register VReg) {
- SlotIndex Idx = Indexes->getMBBEndIdx(&MBB);
- assert(Idx.isValid() && "Invalid Instruction index!");
- if (InstrCache.contains(&Idx) && InstrCache[&Idx].contains(VReg)) {
- return InstrCache[&Idx][VReg];
- }
- return computeNextUseDistance(MBB, Idx, VReg);
-}
-
-unsigned NextUseResult::computeNextUseDistance(const MachineBasicBlock &MBB,
- const SlotIndex I,
- Register VReg) {
- //T2->startTimer();
-
+ const Register VReg) {
unsigned Dist = Infinity;
-
- SlotIndex Begin = Indexes->getMBBStartIdx(MBB.getNumber());
-
- int IDist = Begin.distance(I)/SlotIndex::InstrDist;
- if (auto VMapRef = getVRegMap(&MBB)) {
- VRegDistances &VRegs = VMapRef.value();
- if (VRegs.contains(VReg)) {
- int UseDist = VRegs[VReg];
- if ((UseDist - IDist) < 0) {
-
- // FIXME: VRegs contains only upward exposed info! In other words - the
- // very first use in block!
- // (UseDist - IDist) < 0 just means that our MI is later then the 1st
- // use of the VReg.
- // Function user (calls from outside: from SSASpiller) is interested in
- // the next use in block after the MI!
- // We need to scan for the uses in current block - from MI to the block
- // end BEFORE checking the Succs!
-
- // NOTE: Make sure that we don't spoil the info for Next Use analysis
- // itself. If so, we need 2 different functions for querying
- // nextUseDistance!
- bool Done = false;
- MachineInstr *Instr = Indexes->getInstructionFromIndex(I);
- if (Instr) {
- // we canot use SlotIndexes to compare positions because
- // spills/reloads were not added in Instruction Index. So, just scan
- // the BB.
- unsigned D = 0;
- MachineBasicBlock::iterator It(Instr);
- while (It != MBB.end()) {
- if (It->definesRegister(VReg, TRI)) {
- // VReg is DEAD
- Dist = Infinity;
- Done = true;
- break;
- }
- if (It->readsRegister(VReg, TRI)) {
- Dist = D;
- Done = true;
- break;
- }
- D++;
- It++;
- }
- }
- if (!Done)
- // The instruction of interest is after the first use of the register
- // in the block and the register has not been killed in block. Look
- // for the next use in successors.
- for (auto Succ : successors(&MBB)) {
- if (auto SuccVMapRef = getVRegMap(Succ)) {
- VRegDistances &SuccVRegs = SuccVMapRef.value();
- if (SuccVRegs.contains(VReg)) {
- Dist = std::min(Dist, SuccVRegs[VReg]);
- }
- }
- }
- } else {
- Dist = UseDist - IDist;
- }
- } else {
- // We hit a case when the VReg is defined and used inside the block.
- // Let's see if I is in between. Since we may be called from the broken
- // SSA function we cannot rely on MRI.getVRegDef. The VReg Def in block
- // may be reload, so we canot use SlotIndexes to compare positions because
- // spills/reloads were not added in Instruction Index. So, just scan the
- // BB.
- MachineInstr *Instr = Indexes->getInstructionFromIndex(I);
- if (Instr) {
- bool DefSeen = false, InstrSeen = false;
- unsigned D = 0;
- for (auto &MI : MBB) {
- if (InstrSeen)
- D++;
- if (Instr == &MI) {
- if (!DefSeen)
- break;
- InstrSeen = true;
- }
-
- if (MI.definesRegister(VReg, TRI))
- DefSeen = true;
- if (MI.readsRegister(VReg, TRI) && InstrSeen) {
- Dist = D;
- break;
- }
- }
- }
-
- // MachineInstr *Def = MRI->getVRegDef(VReg);
- // assert(Def && "Neither use distance no Def found for reg!");
- // SlotIndex DefIdx = Indexes->getInstructionIndex(*Def);
- // assert(DefIdx.isValid() && "Register Def not in the Index");
- // if (SlotIndex::isEarlierInstr(DefIdx, I)) {
- // // "I" is after the Def
- // for (auto &U : MRI->use_instructions(VReg)) {
- // assert(U.getParent() == &MBB &&
- // "Use out of the block fount but distance was not recorded");
- // SlotIndex UIdx = Indexes->getInstructionIndex(U);
- // if (SlotIndex::isEarlierInstr(I, UIdx)) {
- // unsigned UDist = I.distance(UIdx)/SlotIndex::InstrDist;
- // if (UDist < Dist)
- // Dist = UDist;
- // }
- // }
- // }
- }
- if (Dist != Infinity)
- InstrCache[&I][VReg] = Dist;
- }
- //T2->stopTimer();
- //TG->print(llvm::errs());
+ unsigned MBBNum = MBB.getNumber();
+ if (NextUseMap.contains(MBBNum))
+ Dist = NextUseMap[MBBNum].Bottom[VReg];
return Dist;
}
+// unsigned NextUseResult::getNextUseDistance(const MachineInstr &MI,
+// const Register VReg) {
+// SlotIndex Idx = Indexes->getInstructionIndex(MI);
+// assert(Idx.isValid() && "Invalid Instruction index!");
+// if (InstrCache.contains(&Idx) && InstrCache[&Idx].contains(VReg)) {
+// return InstrCache[&Idx][VReg];
+// }
+// return computeNextUseDistance(*MI.getParent(), Idx, VReg);
+// }
+
+// unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
+// Register VReg) {
+// SlotIndex Idx = Indexes->getMBBEndIdx(&MBB);
+// assert(Idx.isValid() && "Invalid Instruction index!");
+// if (InstrCache.contains(&Idx) && InstrCache[&Idx].contains(VReg)) {
+// return InstrCache[&Idx][VReg];
+// }
+// return computeNextUseDistance(MBB, Idx, VReg);
+// }
+
+// unsigned NextUseResult::computeNextUseDistance(const MachineBasicBlock &MBB,
+// const SlotIndex I,
+// Register VReg) {
+// //T2->startTimer();
+
+// unsigned Dist = Infinity;
+
+// SlotIndex Begin = Indexes->getMBBStartIdx(MBB.getNumber());
+
+// int IDist = Begin.distance(I)/SlotIndex::InstrDist;
+// if (auto VMapRef = getVRegMap(&MBB)) {
+// VRegDistances &VRegs = VMapRef.value();
+// if (VRegs.contains(VReg)) {
+// int UseDist = VRegs[VReg];
+// if ((UseDist - IDist) < 0) {
+
+// // FIXME: VRegs contains only upward exposed info! In other words - the
+// // very first use in block!
+// // (UseDist - IDist) < 0 just means that our MI is later then the 1st
+// // use of the VReg.
+// // Function user (calls from outside: from SSASpiller) is interested in
+// // the next use in block after the MI!
+// // We need to scan for the uses in current block - from MI to the block
+// // end BEFORE checking the Succs!
+
+// // NOTE: Make sure that we don't spoil the info for Next Use analysis
+// // itself. If so, we need 2 different functions for querying
+// // nextUseDistance!
+// bool Done = false;
+// MachineInstr *Instr = Indexes->getInstructionFromIndex(I);
+// if (Instr) {
+// // we canot use SlotIndexes to compare positions because
+// // spills/reloads were not added in Instruction Index. So, just scan
+// // the BB.
+// unsigned D = 0;
+// MachineBasicBlock::iterator It(Instr);
+// while (It != MBB.end()) {
+// if (It->definesRegister(VReg, TRI)) {
+// // VReg is DEAD
+// Dist = Infinity;
+// Done = true;
+// break;
+// }
+// if (It->readsRegister(VReg, TRI)) {
+// Dist = D;
+// Done = true;
+// break;
+// }
+// D++;
+// It++;
+// }
+// }
+// if (!Done)
+// // The instruction of interest is after the first use of the register
+// // in the block and the register has not been killed in block. Look
+// // for the next use in successors.
+// for (auto Succ : successors(&MBB)) {
+// if (auto SuccVMapRef = getVRegMap(Succ)) {
+// VRegDistances &SuccVRegs = SuccVMapRef.value();
+// if (SuccVRegs.contains(VReg)) {
+// Dist = std::min(Dist, SuccVRegs[VReg]);
+// }
+// }
+// }
+// } else {
+// Dist = UseDist - IDist;
+// }
+// } else {
+// // We hit a case when the VReg is defined and used inside the block.
+// // Let's see if I is in between. Since we may be called from the broken
+// // SSA function we cannot rely on MRI.getVRegDef. The VReg Def in block
+// // may be reload, so we canot use SlotIndexes to compare positions because
+// // spills/reloads were not added in Instruction Index. So, just scan the
+// // BB.
+// MachineInstr *Instr = Indexes->getInstructionFromIndex(I);
+// if (Instr) {
+// bool DefSeen = false, InstrSeen = false;
+// unsigned D = 0;
+// for (auto &MI : MBB) {
+// if (InstrSeen)
+// D++;
+// if (Instr == &MI) {
+// if (!DefSeen)
+// break;
+// InstrSeen = true;
+// }
+
+// if (MI.definesRegister(VReg, TRI))
+// DefSeen = true;
+// if (MI.readsRegister(VReg, TRI) && InstrSeen) {
+// Dist = D;
+// break;
+// }
+// }
+// }
+
+// // MachineInstr *Def = MRI->getVRegDef(VReg);
+// // assert(Def && "Neither use distance no Def found for reg!");
+// // SlotIndex DefIdx = Indexes->getInstructionIndex(*Def);
+// // assert(DefIdx.isValid() && "Register Def not in the Index");
+// // if (SlotIndex::isEarlierInstr(DefIdx, I)) {
+// // // "I" is after the Def
+// // for (auto &U : MRI->use_instructions(VReg)) {
+// // assert(U.getParent() == &MBB &&
+// // "Use out of the block fount but distance was not recorded");
+// // SlotIndex UIdx = Indexes->getInstructionIndex(U);
+// // if (SlotIndex::isEarlierInstr(I, UIdx)) {
+// // unsigned UDist = I.distance(UIdx)/SlotIndex::InstrDist;
+// // if (UDist < Dist)
+// // Dist = UDist;
+// // }
+// // }
+// // }
+// }
+// if (Dist != Infinity)
+// InstrCache[&I][VReg] = Dist;
+// }
+// //T2->stopTimer();
+// //TG->print(llvm::errs());
+// return Dist;
+// }
+
AMDGPUNextUseAnalysis::Result
AMDGPUNextUseAnalysis::run(MachineFunction &MF,
MachineFunctionAnalysisManager &MFAM) {
@@ -303,7 +317,7 @@ bool AMDGPUNextUseAnalysisWrapper::runOnMachineFunction(
assert(NU.MRI->isSSA());
NU.init(MF);
NU.analyze(MF);
- LLVM_DEBUG(NU.dump());
+// LLVM_DEBUG(NU.dump());
return false;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index e2f77490d33eb..6b81c489e2fe0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -33,15 +33,23 @@ class NextUseResult {
TimerGroup *TG;
Timer *T1;
Timer *T2;
+
+ using VRegDistances = DenseMap<Register, unsigned>;
+ class NextUseInfo {
+ // FIXME: need to elaborate proper class interface!
+ public:
+ VRegDistances Bottom;
+ DenseMap<const MachineInstr *, VRegDistances> InstrDist;
+ };
- DenseMap<const MachineInstr *, DenseMap<Register, unsigned>> InstrCache;
+ DenseMap<unsigned, NextUseInfo> NextUseMap;
public:
- using VRegDistances = DenseMap<Register, unsigned>;
+
private:
- DenseMap<unsigned, VRegDistances> NextUseMap;
+ //DenseMap<unsigned, VRegDistances> NextUseMap;
DenseMap<unsigned, SetVector<Register>> UsedInBlock;
DenseMap<int, int> EdgeWeigths;
const uint16_t Infinity = std::numeric_limits<unsigned short>::max();
@@ -75,20 +83,20 @@ class NextUseResult {
}
}
- void dump(raw_ostream &O = dbgs()) const {
- for (auto P : NextUseMap) {
- O << "\nMBB_" << P.first << "\n";
- printVregDistances(P.second, O);
- }
- }
-
- std::optional<std::reference_wrapper<VRegDistances>>
- getVRegMap(const MachineBasicBlock *MBB) {
- if (NextUseMap.contains(MBB->getNumber())) {
- return NextUseMap[MBB->getNumber()];
- }
- return std::nullopt;
- }
+ // void dump(raw_ostream &O = dbgs()) const {
+ // for (auto P : NextUseMap) {
+ // O << "\nMBB_" << P.first << "\n";
+ // printVregDistances(P.second, O);
+ // }
+ // }
+
+ // std::optional<std::reference_wrapper<VRegDistances>>
+ // getVRegMap(const MachineBasicBlock *MBB) {
+ // if (NextUseMap.contains(MBB->getNumber())) {
+ // return NextUseMap[MBB->getNumber()];
+ // }
+ // return std::nullopt;
+ // }
VRegDistances &mergeDistances(VRegDistances &LHS, const VRegDistances &RHS,
unsigned Weight = 0) {
@@ -106,17 +114,17 @@ class NextUseResult {
return LHS;
}
- void setNextUseDistance(const MachineBasicBlock *MBB, Register VReg,
- int Distance) {
- auto VMapRef = getVRegMap(MBB);
- if (!VMapRef)
- VMapRef = NextUseMap[MBB->getNumber()];
- VRegDistances &VRegs = VMapRef.value();
- VRegs[VReg] = Distance;
- }
+ // void setNextUseDistance(const MachineBasicBlock *MBB, Register VReg,
+ // int Distance) {
+ // auto VMapRef = getVRegMap(MBB);
+ // if (!VMapRef)
+ // VMapRef = NextUseMap[MBB->getNumber()];
+ // VRegDistances &VRegs = VMapRef.value();
+ // VRegs[VReg] = Distance;
+ // }
- unsigned computeNextUseDistance(const MachineBasicBlock &MBB,
- const SlotIndex I, Register Vreg);
+ // unsigned computeNextUseDistance(const MachineBasicBlock &MBB,
+ // const SlotIndex I, Register Vreg);
void clear() {
NextUseMap.clear();
@@ -132,72 +140,83 @@ class NextUseResult {
}
~NextUseResult() { clear(); }
- void print(raw_ostream &O) const { dump(O); }
+ // void print(raw_ostream &O) const { dump(O); }
- unsigned getNextUseDistance(const MachineInstr &MI, Register VReg);
- unsigned getNextUseDistance(const MachineBasicBlock &MBB, Register VReg);
+ // unsigned getNextUseDistance(const MachineInstr &MI, Register VReg);
+ unsigned getNextUseDistance(const MachineBasicBlock &MBB,
+ const Register VReg);
+ unsigned getNextUseDistance(const MachineBasicBlock::iterator I,
+ const Register VReg);
- bool isDead(MachineBasicBlock &MBB, Register R) {
+ bool isDead(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ Register R) {
if (!R.isVirtual())
report_fatal_error("Only virtual registers allowed!\n", true);
- return getNextUseDistance(MBB, R) == Infinity;
+ return I == MBB.end() ? getNextUseDistance(MBB, R) == Infinity
+ : getNextUseDistance(I, R) == Infinity;
}
- bool isDead(MachineInstr &MI, Register R) {
- if (!R.isVirtual())
- report_fatal_error("Only virtual registers allowed!\n", true);
- return getNextUseDistance(MI, R) == Infinity;
- }
-
- void getSortedForBlockEnd(MachineBasicBlock &MBB,
- SetVector<Register> &Regs) {
- auto SortByDist = [&](const Register LHS, const Register RHS) {
- return getNextUseDistance(MBB, LHS) < getNextUseDistance(MBB, RHS);
- };
- SmallVector<Register> Tmp(Regs.takeVector());
- sort(Tmp, SortByDist);
- Regs.insert(Tmp.begin(), Tmp.end());
- }
-
- void getSortedForInstruction(const MachineInstr &MI,
- SetVector<Register> &Regs) {
- // auto SortByDist = [&](const Register LHS, const Register RHS) {
- // unsigned LDist = getNextUseDistance(MI, LHS);
- // unsigned RDist = getNextUseDistance(MI, RHS);
- // if (LDist == RDist) {
- // const TargetRegisterClass *LRC = TRI->getRegClassForReg(*MRI, LHS);
- // unsigned LSize = TRI->getRegClassWeight(LRC).RegWeight;
- // const TargetRegisterClass *RRC = TRI->getRegClassForReg(*MRI, RHS);
- // unsigned RSize = TRI->getRegClassWeight(RRC).RegWeight;
- // return LSize < RSize;
- // }
- // return LDist < RDist;
- // };
- auto SortByDist = [&](const Register LHS, const Register RHS) {
- return getNextUseDistance(MI, LHS) < getNextUseDistance(MI, RHS);
- };
- SmallVector<Register> Tmp(Regs.takeVector());
- sort(Tmp, SortByDist);
- Regs.insert(Tmp.begin(), Tmp.end());
- }
-
- std::vector<std::pair<Register, unsigned>>
- getSortedByDistance(const MachineInstr &MI, std::vector<Register> &W) {
- std::vector<std::pair<Register, unsigned>> Result;
- auto compareByVal = [](std::pair<Register, unsigned> &LHS,
- std::pair<Register, unsigned> &RHS) -> bool {
- return LHS.second < RHS.second;
- };
-
- for (auto R : W) {
- dbgs() << printReg(R);
- Result.push_back(std::make_pair(R, getNextUseDistance(MI, R)));
- }
-
- std::sort(Result.begin(), Result.end(), compareByVal);
-
- return std::move(Result);
- }
+ // bool isDead(MachineBasicBlock &MBB, Register R) {
+ // if (!R.isVirtual())
+ // report_fatal_error("Only virtual registers allowed!\n", true);
+ // return getNextUseDistance(MBB, R) == Infinity;
+ // }
+
+ // bool isDead(MachineInstr &MI, Register R) {
+ // if (!R.isVirtual())
+ // report_fatal_error("Only virtual registers allowed!\n", true);
+ // return getNextUseDistance(MI, R) == Infinity;
+ // }
+
+// void getSortedForBlockEnd(MachineBasicBlock &MBB,
+// SetVector<Register> &Regs) {
+// auto SortByDist = [&](const Register LHS, const Register RHS) {
+// return getNextUseDistance(MBB, LHS) < getNextUseDistance(MBB, RHS);
+// };
+// SmallVector<Register> Tmp(Regs.takeVector());
+// sort(Tmp, SortByDist);
+// Regs.insert(Tmp.begin(), Tmp.end());
+// }
+
+// void getSortedForInstruction(const MachineInstr &MI,
+// SetVector<Register> &Regs) {
+// // auto SortByDist = [&](const Register LHS, const Register RHS) {
+// // unsigned LDist = getNextUseDistance(MI, LHS);
+// // unsigned RDist = getNextUseDistance(MI, RHS);
+// // if (LDist == RDist) {
+// // const TargetRegisterClass *LRC = TRI->getRegClassForReg(*MRI, LHS);
+// // unsigned LSize = TRI->getRegClassWeight(LRC).RegWeight;
+// // const TargetRegisterClass *RRC = TRI->getRegClassForReg(*MRI, RHS);
+// // unsigned RSize = TRI->getRegClassWeight(RRC).RegWeight;
+// // return LSize < RSize;
+// // }
+// // return LDist < RDist;
+// // };
+// auto SortByDist = [&](const Register LHS, const Register RHS) {
+// return getNextUseDistance(MI, LHS) < getNextUseDistance(MI, RHS);
+// };
+// SmallVector<Register> Tmp(Regs.takeVector());
+// sort(Tmp, SortByDist);
+// Regs.insert(Tmp.begin(), Tmp.end());
+// }
+
+// std::vector<std::pair<Register, unsigned>>
+// getSortedByDistance(const MachineInstr &MI, std::vector<Register> &W) {
+// std::vector<std::pair<Register, unsigned>> Result;
+// auto compareByVal = [](std::pair<Register, unsigned> &LHS,
+// std::pair<Register, unsigned> &RHS) -> bool {
+// return LHS.second < RHS.second;
+// };
+
+// for (auto R : W) {
+// dbgs() << printReg(R);
+// Result.push_back(std::make_pair(R, getNextUseDistance(MI, R)));
+// }
+
+// std::sort(Result.begin(), Result.end(), compareByVal);
+
+// return std::move(Result);
+// }
SetVector<Register> usedInBlock(MachineBasicBlock &MBB) {
return std::move(UsedInBlock[MBB.getNumber()]);
@@ -227,10 +246,10 @@ class AMDGPUNextUseAnalysisWrapper : public MachineFunctionPass {
bool runOnMachineFunction(MachineFunction &) override;
void releaseMemory() override { NU.clear(); }
- /// Implement the dump method.
- void print(raw_ostream &O, const Module * = nullptr) const override {
- NU.print(O);
- }
+ // /// Implement the dump method.
+ // void print(raw_ostream &O, const Module * = nullptr) const override {
+ // NU.print(O);
+ // }
NextUseResult &getNU() { return NU; }
};
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index c35c1fe1f0683..3a3fafda7fb56 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -94,6 +94,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
// MF, AMDGPU::RegisterPressureSets::VGPR_32)
// : TRI->getRegPressureSetLimit(
// MF, AMDGPU::RegisterPressureSets::SReg_32);
+ RegisterMap.clear();
}
SpillInfo &getBlockInfo(const MachineBasicBlock &MBB);
@@ -107,13 +108,15 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
void reloadAtEnd(MachineBasicBlock &MBB, Register VReg);
void spillAtEnd(MachineBasicBlock &MBB, Register VReg);
- void reloadBefore(Register VReg, MachineBasicBlock::iterator InsertBefore);
- void spillBefore(Register VReg, MachineBasicBlock::iterator InsertBefore);
+ void reloadBefore(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertBefore, Register VReg);
+ void spillBefore(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertBefore, Register VReg);
unsigned getLoopMaxRP(MachineLoop *L);
void limit(MachineBasicBlock &MBB, RegisterSet &Active, RegisterSet &Spilled,
- MachineBasicBlock::iterator I,
- const RegisterSet Defs = RegisterSet());
+ MachineBasicBlock::iterator I, unsigned Limit,
+ RegisterSet &ToSpill);
unsigned getSizeInRegs(const Register VReg);
unsigned getSizeInRegs(const RegisterSet VRegs);
@@ -122,6 +125,23 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
(!IsVGPRsPass && TRI->isSGPRReg(*MRI, R)));
}
+ void sortRegSetAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ RegisterSet &VRegs) {
+ DenseMap<Register, unsigned> M;
+ bool BlockEnd = I == MBB.end();
+ for (auto R : VRegs)
+ M[R] = BlockEnd ? NU.getNextUseDistance(MBB, R)
+ : NU.getNextUseDistance(I, R);
+
+ auto SortByDist = [&](const Register LHS, const Register RHS) {
+ return M[LHS] < M[RHS];
+ };
+
+ SmallVector<Register> Tmp(VRegs.takeVector());
+ sort(Tmp, SortByDist);
+ VRegs.insert(Tmp.begin(), Tmp.end());
+ }
+
unsigned fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Capacity = 0);
@@ -221,23 +241,28 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
continue;
}
T4->stopTimer();
-
- limit(MBB, Active, Spilled, I);
- limit(MBB, Active, Spilled, I, Defs);
-
+
+ RegisterSet ToSpill;
+ limit(MBB, Active, Spilled, I, NumAvailableRegs, ToSpill);
+ limit(MBB, Active, Spilled, std::next(I),
+ NumAvailableRegs - getSizeInRegs(Defs), ToSpill);
T4->startTimer();
+ for (auto R : ToSpill) {
+ spillBefore(MBB, I, R);
+ Spilled.insert(R);
+ }
// FIXME: limit with Defs is assumed to create room for the registers being
// defined by I. Calling with std::next(I) makes spills inserted AFTER I!!!
Active.insert(Defs.begin(), Defs.end());
// Add reloads for VRegs in Reloads before I
for (auto R : Reloads)
- reloadBefore(R, I);
+ reloadBefore(MBB, I, R);
T4->stopTimer();
}
// Now, clear dead registers.
RegisterSet Deads;
for (auto R : Active) {
- if (NU.isDead(MBB, R))
+ if (NU.isDead(MBB, MBB.end(), R))
Deads.insert(R);
}
Active.set_subtract(Deads);
@@ -287,7 +312,13 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
// We're about to insert N reloads at the end of the predecessor block.
// Make sure we have enough registers for N definitions or spill to make
// room for them.
- limit(*Pred, PE.ActiveSet, PE.SpillSet, Pred->end(), ReloadInPred);
+ RegisterSet ToSpill;
+ limit(*Pred, PE.ActiveSet, PE.SpillSet, Pred->end(),
+ NumAvailableRegs - getSizeInRegs(ReloadInPred), ToSpill);
+ for (auto R : ToSpill) {
+ spillBefore(*Pred, Pred->end(), R);
+ PE.SpillSet.insert(R);
+ }
for (auto R : ReloadInPred) {
reloadAtEnd(*Pred, R);
// FIXME: Do we need to update sets?
@@ -381,28 +412,30 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
}
void AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, Register VReg) {
- reloadBefore(VReg, MBB.getFirstInstrTerminator());
+ reloadBefore(MBB, MBB.getFirstInstrTerminator(), VReg);
}
void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, Register VReg) {
- spillBefore(VReg, MBB.getFirstTerminator());
+ spillBefore(MBB, MBB.getFirstTerminator(), VReg);
}
-void AMDGPUSSASpiller::reloadBefore(Register VReg,
- MachineBasicBlock::iterator InsertBefore) {
+void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertBefore,
+ Register VReg) {
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
int FI = MFI->CreateSpillStackObject(TRI->getRegSizeInBits(*RC),
TRI->getSpillAlign(*RC));
- TII->loadRegFromStackSlot(*InsertBefore->getParent(), InsertBefore, VReg, FI,
+ TII->loadRegFromStackSlot(MBB, InsertBefore, VReg, FI,
RC, TRI, VReg);
}
-void AMDGPUSSASpiller::spillBefore(Register VReg,
- MachineBasicBlock::iterator InsertBefore) {
+void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator InsertBefore,
+ Register VReg) {
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
int FI = MFI->CreateSpillStackObject(TRI->getRegSizeInBits(*RC),
TRI->getSpillAlign(*RC));
- TII->storeRegToStackSlot(*InsertBefore->getParent(), InsertBefore, VReg, true, FI,
+ TII->storeRegToStackSlot(MBB, InsertBefore, VReg, true, FI,
RC, TRI, VReg);
}
@@ -425,69 +458,28 @@ unsigned AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
RegisterSet &Spilled,
- MachineBasicBlock::iterator I,
- const RegisterSet Defs) {
- MachineBasicBlock::iterator LimitPoint = I;
+ MachineBasicBlock::iterator I, unsigned Limit,
+ RegisterSet &ToSpill) {
- if (!Defs.empty()) {
- if (LimitPoint != MBB.end())
- LimitPoint = std::next(LimitPoint);
- }
-
- Active.remove_if([&](Register R) {
- return (LimitPoint == MBB.end()) ? NU.isDead(MBB, R)
- : NU.isDead(*LimitPoint, R);
- });
+ T2->startTimer();
+ Active.remove_if([&](Register R) { return NU.isDead(MBB, I, R); });
unsigned CurRP = getSizeInRegs(Active);
- unsigned DefsRP = getSizeInRegs(Defs);
- if (CurRP <= NumAvailableRegs - DefsRP)
+ if (CurRP <= Limit) {
+ T2->stopTimer();
return;
-
- //T2->startTimer();
- DenseMap<Register, unsigned> M;
- for (auto R : Active) {
- unsigned D = (LimitPoint == MBB.end())
- ? NU.getNextUseDistance(MBB, R)
- : NU.getNextUseDistance(*LimitPoint, R);
- M[R] = D;
}
- auto SortByDist = [&](const Register LHS, const Register RHS) {
- return M[LHS] < M[RHS];
- };
- SmallVector<Register> Tmp(Active.takeVector());
- sort(Tmp, SortByDist);
- Active.insert(Tmp.begin(), Tmp.end());
- //T2->stopTimer();
-
-
- unsigned ShrinkTo = NumAvailableRegs - DefsRP;
- RegisterSet ToSpill;
-
- // unsigned SizeToSpill = CurRP - ShrinkTo;
- // for (auto R : reverse(Active)) {
- // if (SizeToSpill == 0)
- // break;
- // unsigned RegSize = getSizeInRegs(R);
- // if (RegSize <= SizeToSpill) {
- // ToSpill.insert(R);
- // SizeToSpill -= RegSize;
- // }
- // }
- // Active.set_subtract(ToSpill);
-
- while (CurRP > ShrinkTo) {
+
+ sortRegSetAt(MBB, I, Active);
+
+ while (CurRP > Limit) {
auto R = Active.pop_back_val();
unsigned RegSize = getSizeInRegs(R);
CurRP -= RegSize;
if (!Spilled.contains(R))
ToSpill.insert(R);
}
-
- for (auto R : ToSpill) {
- spillBefore(R, I);
- Spilled.insert(R);
- }
+ T2->stopTimer();
}
unsigned AMDGPUSSASpiller::getSizeInRegs(const Register VReg) {
@@ -508,7 +500,7 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Limit = Capacity ? Capacity : NumAvailableRegs;
auto &Active = RegisterMap[MBB.getNumber()].ActiveSet;
unsigned Size = getSizeInRegs(Active);
- NU.getSortedForInstruction(*MBB.instr_begin(), S);
+ sortRegSetAt(MBB, MBB.begin(), S);
for (auto VReg : S) {
unsigned RSize = getSizeInRegs(VReg);
if (Size + RSize < Limit) {
@@ -525,7 +517,6 @@ bool AMDGPUSSASpiller::run(MachineFunction &MF) {
init(MF, true);
processFunction(MF);
- dbgs() << "SSA Spiller end\n";
TG->print(llvm::errs());
return false;
}
>From 2f9d277e95a38d5b6f740eb99078b87c52a9e50d Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Tue, 24 Dec 2024 08:52:11 -0600
Subject: [PATCH 08/46] SSA Spiller. Added VRM by example of that used in
regalloc but for the virt regs only
---
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 70 +++++++++++++++------
1 file changed, 51 insertions(+), 19 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 3a3fafda7fb56..bc703be0eebb5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -40,6 +40,35 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
const GCNSubtarget *ST;
MachineFrameInfo *MFI;
+ static constexpr int NO_STACK_SLOT = INT_MAX;
+
+ unsigned NumSpillSlots;
+
+ IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
+
+ unsigned createSpillSlot(const TargetRegisterClass *RC) {
+ unsigned Size = TRI->getSpillSize(*RC);
+ Align Alignment = TRI->getSpillAlign(*RC);
+ // TODO: See VirtRegMap::createSpillSlot - if we need to bother with
+ // TRI->canRealignStack(*MF) ?
+ int SS = MFI->CreateSpillStackObject(Size, Alignment);
+ ++NumSpillSlots;
+ return SS;
+ }
+
+ int assignVirt2StackSlot(Register virtReg) {
+ assert(virtReg.isVirtual());
+ assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
+ "attempt to assign stack slot to already spilled register");
+ const TargetRegisterClass *RC = MRI->getRegClass(virtReg);
+ return Virt2StackSlotMap[virtReg] = createSpillSlot(RC);
+ }
+
+ int getStackSlot(Register virtReg) const {
+ assert(virtReg.isVirtual());
+ return Virt2StackSlotMap[virtReg.id()];
+ }
+
TimerGroup *TG;
Timer *T1;
Timer *T2;
@@ -77,11 +106,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
void init(MachineFunction &MF, bool IsVGPRs) {
IsVGPRsPass = IsVGPRs;
- ST = &MF.getSubtarget<GCNSubtarget>();
- MRI = &MF.getRegInfo();
- MFI = &MF.getFrameInfo();
- TRI = ST->getRegisterInfo();
- TII = ST->getInstrInfo();
+
TG = new TimerGroup("SSA SPiller Timing", "Time Spent in different parts of the SSA Spiller");
T1 = new Timer("General time", "ProcessFunction", *TG);
T2 = new Timer("Limit", "Time spent in limit()", *TG);
@@ -150,13 +175,14 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
AMDGPUSSASpiller(const LiveIntervals &LIS, MachineLoopInfo &LI,
MachineDominatorTree &MDT, AMDGPUNextUseAnalysis::Result &NU)
- : LIS(LIS), LI(LI), MDT(MDT), NU(NU) {}
- ~AMDGPUSSASpiller() {
- delete TG;
- delete T2;
- delete T3;
- delete T4;
- //delete TG;
+ : LIS(LIS), LI(LI), MDT(MDT), NU(NU),
+ NumSpillSlots(0), Virt2StackSlotMap(NO_STACK_SLOT) {}
+ ~AMDGPUSSASpiller() {
+ delete TG;
+ delete T2;
+ delete T3;
+ delete T4;
+ // delete TG;
}
bool run(MachineFunction &MF);
};
@@ -423,8 +449,7 @@ void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
Register VReg) {
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
- int FI = MFI->CreateSpillStackObject(TRI->getRegSizeInBits(*RC),
- TRI->getSpillAlign(*RC));
+ int FI = getStackSlot(VReg);
TII->loadRegFromStackSlot(MBB, InsertBefore, VReg, FI,
RC, TRI, VReg);
}
@@ -433,10 +458,9 @@ void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
Register VReg) {
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
- int FI = MFI->CreateSpillStackObject(TRI->getRegSizeInBits(*RC),
- TRI->getSpillAlign(*RC));
- TII->storeRegToStackSlot(MBB, InsertBefore, VReg, true, FI,
- RC, TRI, VReg);
+ int FI = assignVirt2StackSlot(VReg);
+ TII->storeRegToStackSlot(MBB, InsertBefore, VReg, true,
+ FI, RC, TRI, VReg);
}
unsigned AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
@@ -512,10 +536,18 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
}
bool AMDGPUSSASpiller::run(MachineFunction &MF) {
+ ST = &MF.getSubtarget<GCNSubtarget>();
+ MRI = &MF.getRegInfo();
+ MFI = &MF.getFrameInfo();
+ TRI = ST->getRegisterInfo();
+ TII = ST->getInstrInfo();
+
+ Virt2StackSlotMap.resize(MRI->getNumVirtRegs());
+
init(MF, false);
processFunction(MF);
init(MF, true);
-
+
processFunction(MF);
TG->print(llvm::errs());
return false;
>From c28591b06dc33269a2d4b3fa6b16d9fc6f7b31b2 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Thu, 26 Dec 2024 12:17:47 -0600
Subject: [PATCH 09/46] Next Use Analysis and SSA Spiller must account for sub
registers. WIP 27.12.2024
---
llvm/include/llvm/CodeGen/TargetInstrInfo.h | 30 ++-
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 175 ++--------------
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 197 +++++++++---------
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 166 +++++++++------
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 13 +-
llvm/lib/Target/AMDGPU/SIInstrInfo.h | 35 ++--
llvm/lib/Target/X86/X86InstrInfo.h | 23 +-
7 files changed, 271 insertions(+), 368 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index b5b83c7ff1164..f39b327ebc018 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -1154,14 +1154,13 @@ class LLVM_ABI TargetInstrInfo : public MCInstrInfo {
/// register, \p VReg is the register being assigned. This additional register
/// argument is needed for certain targets when invoked from RegAllocFast to
/// map the spilled physical register to its virtual register. A null register
- /// can be passed elsewhere. The \p Flags is used to set appropriate machine
- /// flags on the spill instruction e.g. FrameSetup flag on a callee saved
- /// register spill instruction, part of prologue, during the frame lowering.
- virtual void storeRegToStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const {
+ /// can be passed elsewhere.
+ virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ Register SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ Register VReg, unsigned SubRegIdx = 0) const {
llvm_unreachable("Target didn't implement "
"TargetInstrInfo::storeRegToStackSlot!");
}
@@ -1173,14 +1172,13 @@ class LLVM_ABI TargetInstrInfo : public MCInstrInfo {
/// register, \p VReg is the register being assigned. This additional register
/// argument is needed for certain targets when invoked from RegAllocFast to
/// map the loaded physical register to its virtual register. A null register
- /// can be passed elsewhere. The \p Flags is used to set appropriate machine
- /// flags on the spill instruction e.g. FrameDestroy flag on a callee saved
- /// register reload instruction, part of epilogue, during the frame lowering.
- virtual void loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const {
+ /// can be passed elsewhere.
+ virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ Register DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ Register VReg, unsigned SubRegIdx = 0) const {
llvm_unreachable("Target didn't implement "
"TargetInstrInfo::loadRegFromStackSlot!");
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 1ceebc132fc6f..60e6d112c9dda 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -10,9 +10,10 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/SlotIndexes.h"
-#include "llvm/Passes/PassPlugin.h"
#include "llvm/InitializePasses.h"
+#include "llvm/MC/LaneBitmask.h"
#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Passes/PassPlugin.h"
#include "llvm/Support/Timer.h"
#include "AMDGPU.h"
@@ -89,12 +90,24 @@ void NextUseResult::analyze(const MachineFunction &MF) {
for (auto &MO : MI.operands()) {
if (MO.isReg() && MO.getReg().isVirtual()) {
- Register VReg = MO.getReg();
+ VRegMaskPair P(MO, *TRI);
if(MO.isUse()) {
- Curr[VReg] = 0;
- UsedInBlock[MBB->getNumber()].insert(VReg);
+ Curr[P] = 0;
+ UsedInBlock[MBB->getNumber()].insert(P);
} else if (MO.isDef()) {
- Curr.erase(VReg);
+
+ SmallVector<VRegMaskPair> ToKill;
+ for (auto X : Curr) {
+ if (X.first.VReg == P.VReg) {
+ X.first.LaneMask &= ~P.LaneMask;
+ if (X.first.LaneMask.none())
+ ToKill.push_back(X.first);
+ }
+ }
+
+ for (auto D : ToKill) {
+ Curr.erase(D);
+ }
}
}
}
@@ -114,168 +127,26 @@ void NextUseResult::analyze(const MachineFunction &MF) {
}
unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock::iterator I,
- const Register VReg) {
+ const VRegMaskPair VMP) {
unsigned Dist = Infinity;
const MachineBasicBlock *MBB = I->getParent();
unsigned MBBNum = MBB->getNumber();
if (NextUseMap.contains(MBBNum) &&
NextUseMap[MBBNum].InstrDist.contains(&*I) &&
- NextUseMap[MBBNum].InstrDist[&*I].contains(VReg))
- Dist = NextUseMap[MBBNum].InstrDist[&*I][VReg];
+ NextUseMap[MBBNum].InstrDist[&*I].contains(VMP))
+ Dist = NextUseMap[MBBNum].InstrDist[&*I][VMP];
return Dist;
}
unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
- const Register VReg) {
+ const VRegMaskPair VMP) {
unsigned Dist = Infinity;
unsigned MBBNum = MBB.getNumber();
if (NextUseMap.contains(MBBNum))
- Dist = NextUseMap[MBBNum].Bottom[VReg];
+ Dist = NextUseMap[MBBNum].Bottom[VMP];
return Dist;
}
-// unsigned NextUseResult::getNextUseDistance(const MachineInstr &MI,
-// const Register VReg) {
-// SlotIndex Idx = Indexes->getInstructionIndex(MI);
-// assert(Idx.isValid() && "Invalid Instruction index!");
-// if (InstrCache.contains(&Idx) && InstrCache[&Idx].contains(VReg)) {
-// return InstrCache[&Idx][VReg];
-// }
-// return computeNextUseDistance(*MI.getParent(), Idx, VReg);
-// }
-
-// unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
-// Register VReg) {
-// SlotIndex Idx = Indexes->getMBBEndIdx(&MBB);
-// assert(Idx.isValid() && "Invalid Instruction index!");
-// if (InstrCache.contains(&Idx) && InstrCache[&Idx].contains(VReg)) {
-// return InstrCache[&Idx][VReg];
-// }
-// return computeNextUseDistance(MBB, Idx, VReg);
-// }
-
-// unsigned NextUseResult::computeNextUseDistance(const MachineBasicBlock &MBB,
-// const SlotIndex I,
-// Register VReg) {
-// //T2->startTimer();
-
-// unsigned Dist = Infinity;
-
-// SlotIndex Begin = Indexes->getMBBStartIdx(MBB.getNumber());
-
-// int IDist = Begin.distance(I)/SlotIndex::InstrDist;
-// if (auto VMapRef = getVRegMap(&MBB)) {
-// VRegDistances &VRegs = VMapRef.value();
-// if (VRegs.contains(VReg)) {
-// int UseDist = VRegs[VReg];
-// if ((UseDist - IDist) < 0) {
-
-// // FIXME: VRegs contains only upward exposed info! In other words - the
-// // very first use in block!
-// // (UseDist - IDist) < 0 just means that our MI is later then the 1st
-// // use of the VReg.
-// // Function user (calls from outside: from SSASpiller) is interested in
-// // the next use in block after the MI!
-// // We need to scan for the uses in current block - from MI to the block
-// // end BEFORE checking the Succs!
-
-// // NOTE: Make sure that we don't spoil the info for Next Use analysis
-// // itself. If so, we need 2 different functions for querying
-// // nextUseDistance!
-// bool Done = false;
-// MachineInstr *Instr = Indexes->getInstructionFromIndex(I);
-// if (Instr) {
-// // we canot use SlotIndexes to compare positions because
-// // spills/reloads were not added in Instruction Index. So, just scan
-// // the BB.
-// unsigned D = 0;
-// MachineBasicBlock::iterator It(Instr);
-// while (It != MBB.end()) {
-// if (It->definesRegister(VReg, TRI)) {
-// // VReg is DEAD
-// Dist = Infinity;
-// Done = true;
-// break;
-// }
-// if (It->readsRegister(VReg, TRI)) {
-// Dist = D;
-// Done = true;
-// break;
-// }
-// D++;
-// It++;
-// }
-// }
-// if (!Done)
-// // The instruction of interest is after the first use of the register
-// // in the block and the register has not been killed in block. Look
-// // for the next use in successors.
-// for (auto Succ : successors(&MBB)) {
-// if (auto SuccVMapRef = getVRegMap(Succ)) {
-// VRegDistances &SuccVRegs = SuccVMapRef.value();
-// if (SuccVRegs.contains(VReg)) {
-// Dist = std::min(Dist, SuccVRegs[VReg]);
-// }
-// }
-// }
-// } else {
-// Dist = UseDist - IDist;
-// }
-// } else {
-// // We hit a case when the VReg is defined and used inside the block.
-// // Let's see if I is in between. Since we may be called from the broken
-// // SSA function we cannot rely on MRI.getVRegDef. The VReg Def in block
-// // may be reload, so we canot use SlotIndexes to compare positions because
-// // spills/reloads were not added in Instruction Index. So, just scan the
-// // BB.
-// MachineInstr *Instr = Indexes->getInstructionFromIndex(I);
-// if (Instr) {
-// bool DefSeen = false, InstrSeen = false;
-// unsigned D = 0;
-// for (auto &MI : MBB) {
-// if (InstrSeen)
-// D++;
-// if (Instr == &MI) {
-// if (!DefSeen)
-// break;
-// InstrSeen = true;
-// }
-
-// if (MI.definesRegister(VReg, TRI))
-// DefSeen = true;
-// if (MI.readsRegister(VReg, TRI) && InstrSeen) {
-// Dist = D;
-// break;
-// }
-// }
-// }
-
-// // MachineInstr *Def = MRI->getVRegDef(VReg);
-// // assert(Def && "Neither use distance no Def found for reg!");
-// // SlotIndex DefIdx = Indexes->getInstructionIndex(*Def);
-// // assert(DefIdx.isValid() && "Register Def not in the Index");
-// // if (SlotIndex::isEarlierInstr(DefIdx, I)) {
-// // // "I" is after the Def
-// // for (auto &U : MRI->use_instructions(VReg)) {
-// // assert(U.getParent() == &MBB &&
-// // "Use out of the block fount but distance was not recorded");
-// // SlotIndex UIdx = Indexes->getInstructionIndex(U);
-// // if (SlotIndex::isEarlierInstr(I, UIdx)) {
-// // unsigned UDist = I.distance(UIdx)/SlotIndex::InstrDist;
-// // if (UDist < Dist)
-// // Dist = UDist;
-// // }
-// // }
-// // }
-// }
-// if (Dist != Infinity)
-// InstrCache[&I][VReg] = Dist;
-// }
-// //T2->stopTimer();
-// //TG->print(llvm::errs());
-// return Dist;
-// }
-
AMDGPUNextUseAnalysis::Result
AMDGPUNextUseAnalysis::run(MachineFunction &MF,
MachineFunctionAnalysisManager &MFAM) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 6b81c489e2fe0..d7a9ab81821e8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -23,6 +23,55 @@ using namespace llvm;
// namespace {
+struct VRegMaskPair {
+public:
+ Register VReg;
+ LaneBitmask LaneMask;
+
+ VRegMaskPair(Register VReg, LaneBitmask LaneMask)
+ : VReg(VReg), LaneMask(LaneMask) {}
+
+ VRegMaskPair(const MachineOperand MO, const TargetRegisterInfo &TRI) {
+ assert(MO.isReg() && "Not a register operand!");
+ Register R = MO.getReg();
+ assert(R.isVirtual() && "Not a virtual register!");
+ VReg = R;
+ LaneMask = LaneBitmask::getAll();
+ unsigned subRegIndex = MO.getSubReg();
+ if (subRegIndex) {
+ LaneMask = TRI.getSubRegIndexLaneMask(subRegIndex);
+ }
+ }
+
+ bool operator==(const VRegMaskPair &other) const {
+ return VReg == other.VReg && LaneMask == other.LaneMask;
+ }
+};
+
+namespace llvm {
+template <> struct DenseMapInfo<VRegMaskPair> {
+ static inline VRegMaskPair getEmptyKey() {
+ return {Register(DenseMapInfo<unsigned>::getEmptyKey()),
+ LaneBitmask(0xFFFFFFFFFFFFFFFFULL)};
+ }
+
+ static inline VRegMaskPair getTombstoneKey() {
+ return {Register(DenseMapInfo<unsigned>::getTombstoneKey()),
+ LaneBitmask(0xFFFFFFFFFFFFFFFEULL)};
+ }
+
+ static unsigned getHashValue(const VRegMaskPair &P) {
+ return DenseMapInfo<unsigned>::getHashValue(P.VReg.id()) ^
+ DenseMapInfo<uint64_t>::getHashValue(P.LaneMask.getAsInteger());
+ }
+
+ static bool isEqual(const VRegMaskPair &LHS, const VRegMaskPair &RHS) {
+ return DenseMapInfo<unsigned>::isEqual(LHS.VReg.id(), RHS.VReg.id()) &&
+ DenseMapInfo<uint64_t>::isEqual(LHS.LaneMask.getAsInteger(),
+ RHS.LaneMask.getAsInteger());
+ }
+};
+} // namespace llvm
class NextUseResult {
friend class AMDGPUNextUseAnalysisWrapper;
SlotIndexes *Indexes;
@@ -34,23 +83,33 @@ class NextUseResult {
Timer *T1;
Timer *T2;
- using VRegDistances = DenseMap<Register, unsigned>;
+ using VRegDistances = DenseMap<VRegMaskPair, unsigned>;
class NextUseInfo {
// FIXME: need to elaborate proper class interface!
public:
VRegDistances Bottom;
DenseMap<const MachineInstr *, VRegDistances> InstrDist;
};
-
-
+
+ // VRegMaskPair getFromOperand(const MachineOperand &MO) {
+ // assert(MO.isReg() && "Not a register operand!");
+ // Register R = MO.getReg();
+ // assert(R.isVirtual() && "Not a virtual register!");
+ // LaneBitmask Mask = LaneBitmask::getAll();
+ // unsigned subRegIndex = MO.getSubReg();
+ // if (subRegIndex) {
+ // Mask = TRI->getSubRegIndexLaneMask(subRegIndex);
+ // }
+ // return {R, Mask};
+ // }
+
DenseMap<unsigned, NextUseInfo> NextUseMap;
public:
private:
- //DenseMap<unsigned, VRegDistances> NextUseMap;
- DenseMap<unsigned, SetVector<Register>> UsedInBlock;
+ DenseMap<unsigned, SetVector<VRegMaskPair>> UsedInBlock;
DenseMap<int, int> EdgeWeigths;
const uint16_t Infinity = std::numeric_limits<unsigned short>::max();
void init(const MachineFunction &MF);
@@ -72,14 +131,35 @@ class NextUseResult {
raw_ostream &O = dbgs()) const {
O << "\n";
for (auto P : D) {
- O << "Vreg: " << printReg(P.first) << "[ " << P.second << "]\n";
+ SmallVector<unsigned> Idxs;
+ const TargetRegisterClass *RC =
+ TRI->getRegClassForReg(*MRI, P.first.VReg);
+ bool HasSubReg =
+ TRI->getCoveringSubRegIndexes(*MRI, RC, P.first.LaneMask, Idxs);
+ O << "Vreg: ";
+ if (HasSubReg)
+ for (auto i : Idxs)
+ O << printReg(P.first.VReg, TRI, i, MRI) << "[ " << P.second << "]\n";
+ else
+ O << printReg(P.first.VReg) << "[ " << P.second << "]\n";
}
}
void printVregDistancesD(const VRegDistances &D) const {
dbgs() << "\n";
for (auto P : D) {
- dbgs() << "Vreg: " << printReg(P.first) << "[ " << P.second << "]\n";
+ SmallVector<unsigned> Idxs;
+ const TargetRegisterClass *RC =
+ TRI->getRegClassForReg(*MRI, P.first.VReg);
+ bool HasSubReg =
+ TRI->getCoveringSubRegIndexes(*MRI, RC, P.first.LaneMask, Idxs);
+ dbgs() << "Vreg: ";
+ if (HasSubReg)
+ for (auto i : Idxs)
+ dbgs() << printReg(P.first.VReg, TRI, i, MRI) << "[ " << P.second
+ << "]\n";
+ else
+ dbgs() << printReg(P.first.VReg) << "[ " << P.second << "]\n";
}
}
@@ -90,20 +170,12 @@ class NextUseResult {
// }
// }
- // std::optional<std::reference_wrapper<VRegDistances>>
- // getVRegMap(const MachineBasicBlock *MBB) {
- // if (NextUseMap.contains(MBB->getNumber())) {
- // return NextUseMap[MBB->getNumber()];
- // }
- // return std::nullopt;
- // }
-
VRegDistances &mergeDistances(VRegDistances &LHS, const VRegDistances &RHS,
unsigned Weight = 0) {
for (auto Pair : LHS) {
- Register VReg = Pair.getFirst();
- if (RHS.contains(VReg)) {
- LHS[VReg] = std::min(Pair.getSecond(), RHS.lookup(VReg) + Weight);
+ VRegMaskPair VRegMP = Pair.getFirst();
+ if (RHS.contains(VRegMP)) {
+ LHS[VRegMP] = std::min(Pair.getSecond(), RHS.lookup(VRegMP) + Weight);
}
}
for (auto Pair : RHS) {
@@ -114,18 +186,6 @@ class NextUseResult {
return LHS;
}
- // void setNextUseDistance(const MachineBasicBlock *MBB, Register VReg,
- // int Distance) {
- // auto VMapRef = getVRegMap(MBB);
- // if (!VMapRef)
- // VMapRef = NextUseMap[MBB->getNumber()];
- // VRegDistances &VRegs = VMapRef.value();
- // VRegs[VReg] = Distance;
- // }
-
- // unsigned computeNextUseDistance(const MachineBasicBlock &MBB,
- // const SlotIndex I, Register Vreg);
-
void clear() {
NextUseMap.clear();
EdgeWeigths.clear();
@@ -142,83 +202,20 @@ class NextUseResult {
// void print(raw_ostream &O) const { dump(O); }
- // unsigned getNextUseDistance(const MachineInstr &MI, Register VReg);
unsigned getNextUseDistance(const MachineBasicBlock &MBB,
- const Register VReg);
+ const VRegMaskPair VMP);
unsigned getNextUseDistance(const MachineBasicBlock::iterator I,
- const Register VReg);
+ const VRegMaskPair VMP);
bool isDead(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- Register R) {
- if (!R.isVirtual())
+ const VRegMaskPair VMP) {
+ if (!VMP.VReg.isVirtual())
report_fatal_error("Only virtual registers allowed!\n", true);
- return I == MBB.end() ? getNextUseDistance(MBB, R) == Infinity
- : getNextUseDistance(I, R) == Infinity;
+ return I == MBB.end() ? getNextUseDistance(MBB, VMP) == Infinity
+ : getNextUseDistance(I, VMP) == Infinity;
}
- // bool isDead(MachineBasicBlock &MBB, Register R) {
- // if (!R.isVirtual())
- // report_fatal_error("Only virtual registers allowed!\n", true);
- // return getNextUseDistance(MBB, R) == Infinity;
- // }
-
- // bool isDead(MachineInstr &MI, Register R) {
- // if (!R.isVirtual())
- // report_fatal_error("Only virtual registers allowed!\n", true);
- // return getNextUseDistance(MI, R) == Infinity;
- // }
-
-// void getSortedForBlockEnd(MachineBasicBlock &MBB,
-// SetVector<Register> &Regs) {
-// auto SortByDist = [&](const Register LHS, const Register RHS) {
-// return getNextUseDistance(MBB, LHS) < getNextUseDistance(MBB, RHS);
-// };
-// SmallVector<Register> Tmp(Regs.takeVector());
-// sort(Tmp, SortByDist);
-// Regs.insert(Tmp.begin(), Tmp.end());
-// }
-
-// void getSortedForInstruction(const MachineInstr &MI,
-// SetVector<Register> &Regs) {
-// // auto SortByDist = [&](const Register LHS, const Register RHS) {
-// // unsigned LDist = getNextUseDistance(MI, LHS);
-// // unsigned RDist = getNextUseDistance(MI, RHS);
-// // if (LDist == RDist) {
-// // const TargetRegisterClass *LRC = TRI->getRegClassForReg(*MRI, LHS);
-// // unsigned LSize = TRI->getRegClassWeight(LRC).RegWeight;
-// // const TargetRegisterClass *RRC = TRI->getRegClassForReg(*MRI, RHS);
-// // unsigned RSize = TRI->getRegClassWeight(RRC).RegWeight;
-// // return LSize < RSize;
-// // }
-// // return LDist < RDist;
-// // };
-// auto SortByDist = [&](const Register LHS, const Register RHS) {
-// return getNextUseDistance(MI, LHS) < getNextUseDistance(MI, RHS);
-// };
-// SmallVector<Register> Tmp(Regs.takeVector());
-// sort(Tmp, SortByDist);
-// Regs.insert(Tmp.begin(), Tmp.end());
-// }
-
-// std::vector<std::pair<Register, unsigned>>
-// getSortedByDistance(const MachineInstr &MI, std::vector<Register> &W) {
-// std::vector<std::pair<Register, unsigned>> Result;
-// auto compareByVal = [](std::pair<Register, unsigned> &LHS,
-// std::pair<Register, unsigned> &RHS) -> bool {
-// return LHS.second < RHS.second;
-// };
-
-// for (auto R : W) {
-// dbgs() << printReg(R);
-// Result.push_back(std::make_pair(R, getNextUseDistance(MI, R)));
-// }
-
-// std::sort(Result.begin(), Result.end(), compareByVal);
-
-// return std::move(Result);
-// }
-
- SetVector<Register> usedInBlock(MachineBasicBlock &MBB) {
+ SetVector<VRegMaskPair> usedInBlock(MachineBasicBlock &MBB) {
return std::move(UsedInBlock[MBB.getNumber()]);
}
};
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index bc703be0eebb5..61b6bb6a40070 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -44,7 +44,13 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
unsigned NumSpillSlots;
- IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
+ DenseMap<VRegMaskPair, unsigned> Virt2StackSlotMap;
+
+ // TODO: HOW TO MAP VREG + LANEMASK TO SPILL SLOT ???
+
+ // IF IT EVEN POSSIBLE TO SPILL REG.SUBREG ?
+
+ // CREATE NEW PSEUDOS SI_SPILL_XXX_SAVE/RESTORE_WITH_SUBREG ???
unsigned createSpillSlot(const TargetRegisterClass *RC) {
unsigned Size = TRI->getSpillSize(*RC);
@@ -56,17 +62,17 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
return SS;
}
- int assignVirt2StackSlot(Register virtReg) {
- assert(virtReg.isVirtual());
- assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
+ unsigned assignVirt2StackSlot(VRegMaskPair VMP) {
+ assert(VMP.VReg.isVirtual());
+ assert(!Virt2StackSlotMap.contains(VMP) &&
"attempt to assign stack slot to already spilled register");
- const TargetRegisterClass *RC = MRI->getRegClass(virtReg);
- return Virt2StackSlotMap[virtReg] = createSpillSlot(RC);
+ const TargetRegisterClass *RC = MRI->getRegClass(VMP.VReg);
+ return Virt2StackSlotMap[VMP] = createSpillSlot(RC);
}
- int getStackSlot(Register virtReg) const {
- assert(virtReg.isVirtual());
- return Virt2StackSlotMap[virtReg.id()];
+ unsigned getStackSlot(VRegMaskPair VMP) {
+ assert(VMP.VReg.isVirtual());
+ return Virt2StackSlotMap[VMP];
}
TimerGroup *TG;
@@ -75,7 +81,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
Timer *T3;
Timer *T4;
- using RegisterSet = SetVector<Register>;
+ using RegisterSet = SetVector<VRegMaskPair>;
struct SpillInfo {
//MachineBasicBlock *Parent;
@@ -89,16 +95,28 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
DenseMap<unsigned, unsigned> PostponedLoopLatches;
DenseMap<unsigned, SmallVector<unsigned>> LoopHeader2Latches;
+ void printVRegMaskPair(const VRegMaskPair P) {
+ SmallVector<unsigned> Idxs;
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, P.VReg);
+ bool HasSubReg = TRI->getCoveringSubRegIndexes(*MRI, RC, P.LaneMask, Idxs);
+ dbgs() << "Vreg: ";
+ if (HasSubReg)
+ for (auto i : Idxs)
+ dbgs() << printReg(P.VReg, TRI, i, MRI) << "]\n";
+ else
+ dbgs() << printReg(P.VReg) << "]\n";
+ }
+
void dump() {
for (auto SI : RegisterMap) {
dbgs() << "\nMBB: " << SI.first;
dbgs() << "\n\tW: ";
- for (auto R : SI.second.ActiveSet) {
- dbgs() << printReg(R) << " ";
+ for (auto P : SI.second.ActiveSet) {
+ printVRegMaskPair(P);
}
dbgs() << "\n\tS: ";
- for (auto R : SI.second.SpillSet) {
- dbgs() << printReg(R) << " ";
+ for (auto P : SI.second.SpillSet) {
+ printVRegMaskPair(P);
}
dbgs() << "\n";
}
@@ -131,20 +149,24 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
void initActiveSetUsualBlock(MachineBasicBlock &MBB);
void initActiveSetLoopHeader(MachineBasicBlock &MBB);
- void reloadAtEnd(MachineBasicBlock &MBB, Register VReg);
- void spillAtEnd(MachineBasicBlock &MBB, Register VReg);
+ void reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP);
+ void spillAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP);
void reloadBefore(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore, Register VReg);
+ MachineBasicBlock::iterator InsertBefore, VRegMaskPair VMP);
void spillBefore(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore, Register VReg);
+ MachineBasicBlock::iterator InsertBefore, VRegMaskPair VMP);
unsigned getLoopMaxRP(MachineLoop *L);
void limit(MachineBasicBlock &MBB, RegisterSet &Active, RegisterSet &Spilled,
MachineBasicBlock::iterator I, unsigned Limit,
RegisterSet &ToSpill);
- unsigned getSizeInRegs(const Register VReg);
+ unsigned getSizeInRegs(const VRegMaskPair VMP);
unsigned getSizeInRegs(const RegisterSet VRegs);
+
+ const TargetRegisterClass *getRegClassForVregMaskPair(VRegMaskPair VMP,
+ unsigned &SubRegIdx);
+
bool takeReg(Register R) {
return ((IsVGPRsPass && TRI->isVGPR(*MRI, R)) ||
(!IsVGPRsPass && TRI->isSGPRReg(*MRI, R)));
@@ -152,17 +174,17 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
void sortRegSetAt(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
RegisterSet &VRegs) {
- DenseMap<Register, unsigned> M;
+ DenseMap<VRegMaskPair, unsigned> M;
bool BlockEnd = I == MBB.end();
- for (auto R : VRegs)
- M[R] = BlockEnd ? NU.getNextUseDistance(MBB, R)
- : NU.getNextUseDistance(I, R);
+ for (auto VMP : VRegs)
+ M[VMP] = BlockEnd ? NU.getNextUseDistance(MBB, VMP)
+ : NU.getNextUseDistance(I, VMP);
- auto SortByDist = [&](const Register LHS, const Register RHS) {
+ auto SortByDist = [&](const VRegMaskPair LHS, const VRegMaskPair RHS) {
return M[LHS] < M[RHS];
};
- SmallVector<Register> Tmp(VRegs.takeVector());
+ SmallVector<VRegMaskPair> Tmp(VRegs.takeVector());
sort(Tmp, SortByDist);
VRegs.insert(Tmp.begin(), Tmp.end());
}
@@ -242,24 +264,24 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
Register VReg = U.getReg();
if (!takeReg(VReg))
continue;
- // if (U.getSubReg()) {
- // dbgs() << U << "\n";
- // }
- if (Active.insert(VReg)) {
+
+ VRegMaskPair VMP(U, *TRI);
+
+ if (Active.insert(VMP)) {
// Not in reg, hence, should have been spilled before
// FIXME: This is ODD as the Spilled set is a union among all
// predecessors and should already contain all spilled before!
// SPECIAL CASE: undef
if (!U.isUndef()) {
- Spilled.insert(VReg);
- Reloads.insert(VReg);
+ Spilled.insert(VMP);
+ Reloads.insert(VMP);
}
}
}
RegisterSet Defs;
for (auto D : I->defs()) {
if (D.getReg().isVirtual() && takeReg(D.getReg()))
- Defs.insert(D.getReg());
+ Defs.insert(VRegMaskPair(D, *TRI));
}
if (Reloads.empty() && Defs.empty()) {
@@ -397,7 +419,7 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
if (!LIS.hasInterval(VReg))
continue;
if (takeReg(VReg) && LIS.isLiveInToMBB(LIS.getInterval(VReg), &MBB)) {
- LiveIn.insert(VReg);
+ LiveIn.insert({VReg, LaneBitmask::getAll()});
}
}
@@ -405,7 +427,7 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
for (auto U : PHI.uses()) {
if (U.isReg() && takeReg(U.getReg())) {
// assume PHIs operands are always virtual regs
- LiveIn.insert(U.getReg());
+ LiveIn.insert(VRegMaskPair(U, *TRI));
}
}
}
@@ -437,30 +459,53 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
}
}
-void AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, Register VReg) {
- reloadBefore(MBB, MBB.getFirstInstrTerminator(), VReg);
+const TargetRegisterClass *
+AMDGPUSSASpiller::getRegClassForVregMaskPair(VRegMaskPair VMP,
+ unsigned &SubRegIdx) {
+ const TargetRegisterClass *RC;
+
+ if (VMP.LaneMask.all()) {
+ RC = TRI->getRegClassForReg(*MRI, VMP.VReg);
+ } else {
+ SmallVector<unsigned> Idxs;
+ if (TRI->getCoveringSubRegIndexes(*MRI, RC, VMP.LaneMask, Idxs)) {
+ SubRegIdx = Idxs[0];
+ for (int i = 1; i < Idxs.size() - 1; i++)
+ SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, Idxs[i]);
+ RC = TRI->getSubRegisterClass(RC, SubRegIdx);
+ }
+ }
+
+ return RC;
+}
+
+void AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
+ reloadBefore(MBB, MBB.getFirstInstrTerminator(), VMP);
}
-void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, Register VReg) {
- spillBefore(MBB, MBB.getFirstTerminator(), VReg);
+void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
+ spillBefore(MBB, MBB.getFirstTerminator(), VMP);
}
void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
- Register VReg) {
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
- int FI = getStackSlot(VReg);
- TII->loadRegFromStackSlot(MBB, InsertBefore, VReg, FI,
- RC, TRI, VReg);
+ VRegMaskPair VMP) {
+ unsigned SubRegIdx = 0;
+ const TargetRegisterClass *RC = getRegClassForVregMaskPair(VMP, SubRegIdx);
+ int FI = getStackSlot(VMP);
+ TII->loadRegFromStackSlot(MBB, InsertBefore, VMP.VReg, FI,
+ RC, TRI, VMP.VReg, SubRegIdx);
}
void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
- Register VReg) {
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
- int FI = assignVirt2StackSlot(VReg);
- TII->storeRegToStackSlot(MBB, InsertBefore, VReg, true,
- FI, RC, TRI, VReg);
+ VRegMaskPair VMP) {
+ unsigned SubRegIdx = 0;
+ const TargetRegisterClass *RC = getRegClassForVregMaskPair(VMP, SubRegIdx);
+
+ int FI = assignVirt2StackSlot(VMP);
+ TII->storeRegToStackSlot(MBB, InsertBefore, VMP.VReg, true, FI, RC, TRI,
+ VMP.VReg, SubRegIdx);
}
unsigned AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
@@ -486,7 +531,7 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
RegisterSet &ToSpill) {
T2->startTimer();
- Active.remove_if([&](Register R) { return NU.isDead(MBB, I, R); });
+ Active.remove_if([&](VRegMaskPair P) { return NU.isDead(MBB, I, P); });
unsigned CurRP = getSizeInRegs(Active);
if (CurRP <= Limit) {
@@ -496,25 +541,28 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
sortRegSetAt(MBB, I, Active);
+ // Here we expect that the furthest use is use of %4:VReg_1024.sub_31 so its
+ // size is 32bits
+
while (CurRP > Limit) {
- auto R = Active.pop_back_val();
- unsigned RegSize = getSizeInRegs(R);
+ auto P = Active.pop_back_val();
+ unsigned RegSize = getSizeInRegs(P.VReg);
CurRP -= RegSize;
- if (!Spilled.contains(R))
- ToSpill.insert(R);
+ if (!Spilled.contains(P))
+ ToSpill.insert(P);
}
T2->stopTimer();
}
-unsigned AMDGPUSSASpiller::getSizeInRegs(const Register VReg) {
+unsigned AMDGPUSSASpiller::getSizeInRegs(const VRegMaskPair VMP) {
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
return TRI->getRegClassWeight(RC).RegWeight;
}
unsigned AMDGPUSSASpiller::getSizeInRegs(const RegisterSet VRegs) {
unsigned Size = 0;
- for (auto VReg : VRegs) {
- Size += getSizeInRegs(VReg);
+ for (auto VMP : VRegs) {
+ Size += getSizeInRegs(VMP.VReg);
}
return Size;
}
@@ -525,10 +573,10 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
auto &Active = RegisterMap[MBB.getNumber()].ActiveSet;
unsigned Size = getSizeInRegs(Active);
sortRegSetAt(MBB, MBB.begin(), S);
- for (auto VReg : S) {
- unsigned RSize = getSizeInRegs(VReg);
+ for (auto VMP : S) {
+ unsigned RSize = getSizeInRegs(VMP.VReg);
if (Size + RSize < Limit) {
- Active.insert(VReg);
+ Active.insert(VMP);
Size += RSize;
}
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index d43924d46b005..0dd178482a31d 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1694,8 +1694,7 @@ unsigned SIInstrInfo::getVectorRegSpillSaveOpcode(
void SIInstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+ const TargetRegisterInfo *TRI, Register VReg, unsigned SubRegIdx) const {
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo &FrameInfo = MF->getFrameInfo();
@@ -1741,11 +1740,11 @@ void SIInstrInfo::storeRegToStackSlot(
MFI->setHasSpilledVGPRs();
BuildMI(MBB, MI, DL, get(Opcode))
- .addReg(SrcReg, getKillRegState(isKill)) // data
- .addFrameIndex(FrameIndex) // addr
- .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
- .addImm(0) // offset
- .addMemOperand(MMO);
+ .addReg(SrcReg, getKillRegState(isKill), SubRegIdx) // data
+ .addFrameIndex(FrameIndex) // addr
+ .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
+ .addImm(0) // offset
+ .addMemOperand(MMO);
}
static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index fdbd9ce4a66bf..344498a982b87 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -284,29 +284,18 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
MachineBasicBlock::iterator I, const DebugLoc &DL,
Register SrcReg, int Value) const;
- bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg,
- int64_t &ImmVal) const override;
-
- unsigned getVectorRegSpillSaveOpcode(Register Reg,
- const TargetRegisterClass *RC,
- unsigned Size,
- const SIMachineFunctionInfo &MFI) const;
- unsigned
- getVectorRegSpillRestoreOpcode(Register Reg, const TargetRegisterClass *RC,
- unsigned Size,
- const SIMachineFunctionInfo &MFI) const;
-
- void storeRegToStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
-
- void loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, Register SrcReg,
+ bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI, Register VReg,
+ unsigned SubRegIdx = 0) const override;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, Register DestReg,
+ int FrameIndex, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI, Register VReg,
+ unsigned SubRegIdx = 0) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 9dc5f4b0e086e..07fe1c01e1f00 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -468,17 +468,18 @@ class X86InstrInfo final : public X86GenInstrInfo {
const DebugLoc &DL, Register DestReg, Register SrcReg,
bool KillSrc, bool RenamableDest = false,
bool RenamableSrc = false) const override;
- void storeRegToStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
- bool isKill, int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
-
- void loadRegFromStackSlot(
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
- int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags = MachineInstr::NoFlags) const override;
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, Register SrcReg,
+ bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI, Register VReg,
+ unsigned SubRegIdx = 0) const override;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, Register DestReg,
+ int FrameIndex, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI, Register VReg,
+ unsigned SubRegIdx = 0) const override;
void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned Opc, Register Reg, int FrameIdx,
>From 43ed0912255aca8f3871032a065cf26e3fc38287 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Thu, 9 Jan 2025 09:56:02 -0600
Subject: [PATCH 10/46] SSA Spiller. New algorithm, that works with sub-regs.
1st edition.
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 38 +++-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 184 ++++++++++++++----
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 11 +-
llvm/lib/Target/X86/X86InstrInfo.cpp | 5 +-
4 files changed, 185 insertions(+), 53 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 60e6d112c9dda..fd0c61bc93125 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -132,9 +132,23 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock::iterator I,
const MachineBasicBlock *MBB = I->getParent();
unsigned MBBNum = MBB->getNumber();
if (NextUseMap.contains(MBBNum) &&
- NextUseMap[MBBNum].InstrDist.contains(&*I) &&
- NextUseMap[MBBNum].InstrDist[&*I].contains(VMP))
- Dist = NextUseMap[MBBNum].InstrDist[&*I][VMP];
+ NextUseMap[MBBNum].InstrDist.contains(&*I)) {
+ VRegDistances Dists = NextUseMap[MBBNum].InstrDist[&*I];
+ if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP)) {
+ Dist = Dists[VMP];
+ } else {
+ for (auto P : Dists) {
+ if (P.first.VReg == VMP.VReg) {
+ LaneBitmask UseMask = P.first.LaneMask;
+ LaneBitmask Mask = VMP.LaneMask;
+ if ((UseMask & Mask) == UseMask)
+ if (P.second < Dist)
+ Dist = P.second;
+ }
+ }
+ }
+ }
+
return Dist;
}
@@ -142,8 +156,22 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
const VRegMaskPair VMP) {
unsigned Dist = Infinity;
unsigned MBBNum = MBB.getNumber();
- if (NextUseMap.contains(MBBNum))
- Dist = NextUseMap[MBBNum].Bottom[VMP];
+ if (NextUseMap.contains(MBBNum)) {
+ if (NextUseMap[MBBNum].Bottom.contains(VMP))
+ Dist = NextUseMap[MBBNum].Bottom[VMP];
+ else {
+ VRegDistances Dists = NextUseMap[MBBNum].Bottom;
+ for (auto P : Dists) {
+ if (P.first.VReg == VMP.VReg) {
+ LaneBitmask UseMask = P.first.LaneMask;
+ LaneBitmask Mask = VMP.LaneMask;
+ if ((UseMask & Mask) == UseMask)
+ if (P.second < Dist)
+ Dist = P.second;
+ }
+ }
+ }
+ }
return Dist;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 61b6bb6a40070..f7a39d507efbf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -21,13 +21,7 @@ using namespace llvm;
namespace {
- static void dumpRegSet(SetVector<Register> VRegs) {
- dbgs() << "\n";
- for (auto R : VRegs) {
- dbgs() << printReg(R) << " ";
- }
- dbgs() << "\n";
- }
+
class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
const LiveIntervals &LIS;
@@ -46,11 +40,8 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
DenseMap<VRegMaskPair, unsigned> Virt2StackSlotMap;
- // TODO: HOW TO MAP VREG + LANEMASK TO SPILL SLOT ???
-
- // IF IT EVEN POSSIBLE TO SPILL REG.SUBREG ?
-
- // CREATE NEW PSEUDOS SI_SPILL_XXX_SAVE/RESTORE_WITH_SUBREG ???
+ LLVM_ATTRIBUTE_NOINLINE void
+ dumpRegSet(SetVector<VRegMaskPair> VMPs);
unsigned createSpillSlot(const TargetRegisterClass *RC) {
unsigned Size = TRI->getSpillSize(*RC);
@@ -95,17 +86,8 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
DenseMap<unsigned, unsigned> PostponedLoopLatches;
DenseMap<unsigned, SmallVector<unsigned>> LoopHeader2Latches;
- void printVRegMaskPair(const VRegMaskPair P) {
- SmallVector<unsigned> Idxs;
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, P.VReg);
- bool HasSubReg = TRI->getCoveringSubRegIndexes(*MRI, RC, P.LaneMask, Idxs);
- dbgs() << "Vreg: ";
- if (HasSubReg)
- for (auto i : Idxs)
- dbgs() << printReg(P.VReg, TRI, i, MRI) << "]\n";
- else
- dbgs() << printReg(P.VReg) << "]\n";
- }
+ LLVM_ATTRIBUTE_NOINLINE void
+ printVRegMaskPair(const VRegMaskPair P);
void dump() {
for (auto SI : RegisterMap) {
@@ -192,6 +174,8 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
unsigned fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Capacity = 0);
+ bool isCoveredActive(VRegMaskPair VMP, const RegisterSet Active);
+
public:
AMDGPUSSASpiller() = default;
@@ -209,6 +193,28 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
bool run(MachineFunction &MF);
};
+LLVM_ATTRIBUTE_NOINLINE void
+AMDGPUSSASpiller::dumpRegSet(SetVector<VRegMaskPair> VMPs) {
+ dbgs() << "\n";
+ for (auto P : VMPs) {
+ printVRegMaskPair(P);
+ }
+ dbgs() << "\n";
+}
+
+LLVM_ATTRIBUTE_NOINLINE void
+AMDGPUSSASpiller::printVRegMaskPair(const VRegMaskPair P) {
+ SmallVector<unsigned> Idxs;
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, P.VReg);
+ bool HasSubReg = TRI->getCoveringSubRegIndexes(*MRI, RC, P.LaneMask, Idxs);
+ dbgs() << "Vreg: [";
+ if (HasSubReg)
+ for (auto i : Idxs)
+ dbgs() << printReg(P.VReg, TRI, i, MRI) << "]\n";
+ else
+ dbgs() << printReg(P.VReg) << "]\n";
+}
+
AMDGPUSSASpiller::SpillInfo &
AMDGPUSSASpiller::getBlockInfo(const MachineBasicBlock &MBB) {
if (!RegisterMap.contains(MBB.getNumber()))
@@ -264,10 +270,14 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
Register VReg = U.getReg();
if (!takeReg(VReg))
continue;
+
+ if (U.getSubReg() != AMDGPU::NoSubRegister) {
+ dbgs() << U << "\n";
+ }
VRegMaskPair VMP(U, *TRI);
- if (Active.insert(VMP)) {
+ if (!isCoveredActive(VMP, Active)) {
// Not in reg, hence, should have been spilled before
// FIXME: This is ODD as the Spilled set is a union among all
// predecessors and should already contain all spilled before!
@@ -290,11 +300,16 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
}
T4->stopTimer();
+ dumpRegSet(Active);
+
RegisterSet ToSpill;
limit(MBB, Active, Spilled, I, NumAvailableRegs, ToSpill);
limit(MBB, Active, Spilled, std::next(I),
NumAvailableRegs - getSizeInRegs(Defs), ToSpill);
T4->startTimer();
+
+ dumpRegSet(Active);
+
for (auto R : ToSpill) {
spillBefore(MBB, I, R);
Spilled.insert(R);
@@ -313,7 +328,10 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (NU.isDead(MBB, MBB.end(), R))
Deads.insert(R);
}
+ dumpRegSet(Deads);
+ dumpRegSet(Active);
Active.set_subtract(Deads);
+ dumpRegSet(Active);
}
void AMDGPUSSASpiller::processLoop(MachineLoop *L) {
@@ -462,15 +480,13 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
const TargetRegisterClass *
AMDGPUSSASpiller::getRegClassForVregMaskPair(VRegMaskPair VMP,
unsigned &SubRegIdx) {
- const TargetRegisterClass *RC;
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VMP.VReg);
- if (VMP.LaneMask.all()) {
- RC = TRI->getRegClassForReg(*MRI, VMP.VReg);
- } else {
+ if (!VMP.LaneMask.all()) {
SmallVector<unsigned> Idxs;
if (TRI->getCoveringSubRegIndexes(*MRI, RC, VMP.LaneMask, Idxs)) {
SubRegIdx = Idxs[0];
- for (int i = 1; i < Idxs.size() - 1; i++)
+ for (unsigned i = 1; i < Idxs.size() - 1; i++)
SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, Idxs[i]);
RC = TRI->getSubRegisterClass(RC, SubRegIdx);
}
@@ -540,29 +556,111 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
}
sortRegSetAt(MBB, I, Active);
+
+ dumpRegSet(Active);
// Here we expect that the furthest use is use of %4:VReg_1024.sub_31 so its
// size is 32bits
while (CurRP > Limit) {
auto P = Active.pop_back_val();
- unsigned RegSize = getSizeInRegs(P.VReg);
- CurRP -= RegSize;
- if (!Spilled.contains(P))
- ToSpill.insert(P);
+ unsigned RegSize = getSizeInRegs(P);
+ unsigned SizeToSpill = CurRP - Limit;
+ if (RegSize > SizeToSpill) {
+ const TargetRegisterClass *SuperRC = TRI->getRegClassForReg(*MRI, P.VReg);
+ DenseMap<unsigned, unsigned> Cands;
+ SmallVector<unsigned> Sorted;
+ for (auto &SubReg : MRI->reg_operands(P.VReg)) {
+ unsigned SubRegIdx = SubReg.getSubReg();
+ LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubRegIdx);
+ if ((P.LaneMask & Mask) != LaneBitmask::getNone()) {
+ VRegMaskPair X(P.VReg, Mask);
+ unsigned D = I == MBB.end() ? NU.getNextUseDistance(MBB, X)
+ : NU.getNextUseDistance(I, X);
+ Cands[D] = SubRegIdx;
+ Sorted.push_back(D);
+ }
+ }
+
+ LaneBitmask ActiveMask = P.LaneMask;
+ std::sort(Sorted.begin(), Sorted.end(), [](unsigned x, unsigned y) { return x > y;});
+ for (auto i : Sorted) {
+ unsigned SubIdx = Cands[i];
+ LaneBitmask SubMask = TRI->getSubRegIndexLaneMask(Cands[i]);
+ VRegMaskPair Y(P.VReg, SubMask);
+ dbgs() << "[ " << printReg(Y.VReg, TRI, SubIdx, MRI) << " ] : " << i << "\n";
+ const TargetRegisterClass *RC =
+ TRI->getSubRegisterClass(SuperRC, SubIdx);
+ unsigned Size = TRI->getRegClassWeight(RC).RegWeight;
+ CurRP -= Size;
+ if (!Spilled.contains(Y))
+ ToSpill.insert(Y);
+ ActiveMask &= (~SubMask);
+ if (CurRP == Limit)
+ break;
+ }
+
+ if (ActiveMask.any()) {
+ VRegMaskPair Q(P.VReg, ActiveMask);
+ printVRegMaskPair(Q);
+ Active.insert(Q);
+ }
+
+
+ // const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, P.VReg);
+ // SmallVector<unsigned> Idxs;
+ // if (TRI->getCoveringSubRegIndexes(*MRI, RC, P.LaneMask, Idxs)) {
+ // // TODO: in SSA form each definition always defines the whole register.
+ // // So, in Active set we have a regiters with a full bit mask. Same time,
+ // // if the register uses are a uses of a subregs, we are interested in
+ // // spilling the furthest subreg use.
+ // /*
+ // %1:vreg_1024 <- def
+ // ***
+ // ***
+ // use of %1:vreg_1024.sub0 Dist x
+ // use of %1:vreg_1024.sub1
+ // use of %1:vreg_1024.sub2
+ // ***
+ // many instructions here
+ // ***
+ // use of %1:vreg_1024.sub31 Dist y
+
+ // We want to spill %1.sub31 as its use is the furthest one!
+ // For that we'd want to build a sorted vector of the subreg uses first.
+ // */
+ // unsigned SubRegIdx;
+ // unsigned i = 0;
+ // do {
+ // SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, Idxs[i]);
+ // RC = TRI->getSubRegisterClass(RC, SubRegIdx);
+ // SizeToSpill -= TRI->getRegClassWeight(RC).RegWeight;
+ // i++;
+ // } while (SizeToSpill != 0 && i < Idxs.size() - 1);
+ // LaneBitmask SpillMask = TRI->getSubRegIndexLaneMask(SubRegIdx);
+ // P.LaneMask &= ~SpillMask;
+ // ToSpill.insert({P.VReg, SpillMask});
+ // Active.insert(P);
+ // }
+ } else {
+ CurRP -= RegSize;
+ if (!Spilled.contains(P))
+ ToSpill.insert(P);
+ }
}
T2->stopTimer();
}
unsigned AMDGPUSSASpiller::getSizeInRegs(const VRegMaskPair VMP) {
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
+ unsigned SubRegIdx = 0;
+ const TargetRegisterClass *RC = getRegClassForVregMaskPair(VMP, SubRegIdx);
return TRI->getRegClassWeight(RC).RegWeight;
}
unsigned AMDGPUSSASpiller::getSizeInRegs(const RegisterSet VRegs) {
unsigned Size = 0;
for (auto VMP : VRegs) {
- Size += getSizeInRegs(VMP.VReg);
+ Size += getSizeInRegs(VMP);
}
return Size;
}
@@ -574,7 +672,7 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Size = getSizeInRegs(Active);
sortRegSetAt(MBB, MBB.begin(), S);
for (auto VMP : S) {
- unsigned RSize = getSizeInRegs(VMP.VReg);
+ unsigned RSize = getSizeInRegs(VMP);
if (Size + RSize < Limit) {
Active.insert(VMP);
Size += RSize;
@@ -583,6 +681,18 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
return Size;
}
+bool AMDGPUSSASpiller::isCoveredActive(VRegMaskPair VMP,
+ const RegisterSet Active) {
+ printVRegMaskPair(VMP);
+ dumpRegSet(Active);
+ for (auto P : Active) {
+ if (P.VReg == VMP.VReg) {
+ return (P.LaneMask & VMP.LaneMask).any();
+ }
+ }
+ return false;
+}
+
bool AMDGPUSSASpiller::run(MachineFunction &MF) {
ST = &MF.getSubtarget<GCNSubtarget>();
MRI = &MF.getRegInfo();
@@ -590,8 +700,6 @@ bool AMDGPUSSASpiller::run(MachineFunction &MF) {
TRI = ST->getRegisterInfo();
TII = ST->getInstrInfo();
- Virt2StackSlotMap.resize(MRI->getNumVirtRegs());
-
init(MF, false);
processFunction(MF);
init(MF, true);
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 0dd178482a31d..c4815dfc0ffb0 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1883,13 +1883,10 @@ unsigned SIInstrInfo::getVectorRegSpillRestoreOpcode(
return getVGPRSpillRestoreOpcode(Size);
}
-void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- Register DestReg, int FrameIndex,
- const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI,
- Register VReg,
- MachineInstr::MIFlag Flags) const {
+void SIInstrInfo::loadRegFromStackSlot(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
+ int FrameIndex, const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI, Register VReg, unsigned SubRegIdx) const {
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo &FrameInfo = MF->getFrameInfo();
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index aa477efaed2ef..aefbe590bba35 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4784,8 +4784,7 @@ void X86InstrInfo::loadStoreTileReg(MachineBasicBlock &MBB,
void X86InstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIdx, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- MachineInstr::MIFlag Flags) const {
+ const TargetRegisterInfo *TRI, Register VReg, unsigned SubRegIdx) const {
const MachineFunction &MF = *MBB.getParent();
const MachineFrameInfo &MFI = MF.getFrameInfo();
assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
@@ -4808,7 +4807,7 @@ void X86InstrInfo::storeRegToStackSlot(
void X86InstrInfo::loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, MachineInstr::MIFlag Flags) const {
+ Register VReg, unsigned SubRegIdx) const {
const MachineFunction &MF = *MBB.getParent();
const MachineFrameInfo &MFI = MF.getFrameInfo();
assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
>From b2df34a0868219d9e782a9bb4bcab5753f46710d Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Thu, 26 Dec 2024 12:17:47 -0600
Subject: [PATCH 11/46] Next Use Analysis and SSA Spiller must account for sub
registers. WIP 26.12.2024
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 58 +++++--------------
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 14 ++---
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 36 ++++++++----
3 files changed, 46 insertions(+), 62 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index fd0c61bc93125..c2f34f0deacfc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -93,20 +93,16 @@ void NextUseResult::analyze(const MachineFunction &MF) {
VRegMaskPair P(MO, *TRI);
if(MO.isUse()) {
Curr[P] = 0;
- UsedInBlock[MBB->getNumber()].insert(P);
+ UsedInBlock[MBB->getNumber()].insert(P.VReg);
} else if (MO.isDef()) {
- SmallVector<VRegMaskPair> ToKill;
- for (auto X : Curr) {
- if (X.first.VReg == P.VReg) {
- X.first.LaneMask &= ~P.LaneMask;
- if (X.first.LaneMask.none())
- ToKill.push_back(X.first);
- }
- }
-
- for (auto D : ToKill) {
- Curr.erase(D);
+ SmallVector<VRegMaskPair> ToUpdate;
+ std::copy_if(Curr.begin(), Curr.end(), ToUpdate,
+ [&](VRegMaskPair X) { return X.VReg == P.VReg; });
+ for (auto &Y : ToUpdate) {
+ Y.LaneMask &= ~P.LaneMask;
+ if (Y.LaneMask.none())
+ Curr.erase(Y);
}
}
}
@@ -132,23 +128,9 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock::iterator I,
const MachineBasicBlock *MBB = I->getParent();
unsigned MBBNum = MBB->getNumber();
if (NextUseMap.contains(MBBNum) &&
- NextUseMap[MBBNum].InstrDist.contains(&*I)) {
- VRegDistances Dists = NextUseMap[MBBNum].InstrDist[&*I];
- if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP)) {
- Dist = Dists[VMP];
- } else {
- for (auto P : Dists) {
- if (P.first.VReg == VMP.VReg) {
- LaneBitmask UseMask = P.first.LaneMask;
- LaneBitmask Mask = VMP.LaneMask;
- if ((UseMask & Mask) == UseMask)
- if (P.second < Dist)
- Dist = P.second;
- }
- }
- }
- }
-
+ NextUseMap[MBBNum].InstrDist.contains(&*I) &&
+ NextUseMap[MBBNum].InstrDist[&*I].contains(VMP))
+ Dist = NextUseMap[MBBNum].InstrDist[&*I][VMP];
return Dist;
}
@@ -156,22 +138,8 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
const VRegMaskPair VMP) {
unsigned Dist = Infinity;
unsigned MBBNum = MBB.getNumber();
- if (NextUseMap.contains(MBBNum)) {
- if (NextUseMap[MBBNum].Bottom.contains(VMP))
- Dist = NextUseMap[MBBNum].Bottom[VMP];
- else {
- VRegDistances Dists = NextUseMap[MBBNum].Bottom;
- for (auto P : Dists) {
- if (P.first.VReg == VMP.VReg) {
- LaneBitmask UseMask = P.first.LaneMask;
- LaneBitmask Mask = VMP.LaneMask;
- if ((UseMask & Mask) == UseMask)
- if (P.second < Dist)
- Dist = P.second;
- }
- }
- }
- }
+ if (NextUseMap.contains(MBBNum))
+ Dist = NextUseMap[MBBNum].Bottom[VMP];
return Dist;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index d7a9ab81821e8..04ab93a684e28 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -48,16 +48,16 @@ struct VRegMaskPair {
}
};
-namespace llvm {
-template <> struct DenseMapInfo<VRegMaskPair> {
+template<>
+struct DenseMapInfo<VRegMaskPair> {
static inline VRegMaskPair getEmptyKey() {
return {Register(DenseMapInfo<unsigned>::getEmptyKey()),
LaneBitmask(0xFFFFFFFFFFFFFFFFULL)};
}
static inline VRegMaskPair getTombstoneKey() {
- return {Register(DenseMapInfo<unsigned>::getTombstoneKey()),
- LaneBitmask(0xFFFFFFFFFFFFFFFEULL)};
+ return { Register(DenseMapInfo<unsigned>::getTombstoneKey()),
+ LaneBitmask(0xFFFFFFFFFFFFFFFEULL) };
}
static unsigned getHashValue(const VRegMaskPair &P) {
@@ -71,7 +71,7 @@ template <> struct DenseMapInfo<VRegMaskPair> {
RHS.LaneMask.getAsInteger());
}
};
-} // namespace llvm
+
class NextUseResult {
friend class AMDGPUNextUseAnalysisWrapper;
SlotIndexes *Indexes;
@@ -109,7 +109,7 @@ class NextUseResult {
private:
- DenseMap<unsigned, SetVector<VRegMaskPair>> UsedInBlock;
+ DenseMap<unsigned, SetVector<Register>> UsedInBlock;
DenseMap<int, int> EdgeWeigths;
const uint16_t Infinity = std::numeric_limits<unsigned short>::max();
void init(const MachineFunction &MF);
@@ -215,7 +215,7 @@ class NextUseResult {
: getNextUseDistance(I, VMP) == Infinity;
}
- SetVector<VRegMaskPair> usedInBlock(MachineBasicBlock &MBB) {
+ SetVector<Register> usedInBlock(MachineBasicBlock &MBB) {
return std::move(UsedInBlock[MBB.getNumber()]);
}
};
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index f7a39d507efbf..1e6bb6e29e949 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -40,8 +40,11 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
DenseMap<VRegMaskPair, unsigned> Virt2StackSlotMap;
- LLVM_ATTRIBUTE_NOINLINE void
- dumpRegSet(SetVector<VRegMaskPair> VMPs);
+ // TODO: HOW TO MAP VREG + LANEMASK TO SPILL SLOT ???
+
+ // IF IT EVEN POSSIBLE TO SPILL REG.SUBREG ?
+
+ // CREATE NEW PSEUDOS SI_SPILL_XXX_SAVE/RESTORE_WITH_SUBREG ???
unsigned createSpillSlot(const TargetRegisterClass *RC) {
unsigned Size = TRI->getSpillSize(*RC);
@@ -86,8 +89,17 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
DenseMap<unsigned, unsigned> PostponedLoopLatches;
DenseMap<unsigned, SmallVector<unsigned>> LoopHeader2Latches;
- LLVM_ATTRIBUTE_NOINLINE void
- printVRegMaskPair(const VRegMaskPair P);
+ void printVRegMaskPair(const VRegMaskPair P) {
+ SmallVector<unsigned> Idxs;
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, P.VReg);
+ bool HasSubReg = TRI->getCoveringSubRegIndexes(*MRI, RC, P.LaneMask, Idxs);
+ dbgs() << "Vreg: ";
+ if (HasSubReg)
+ for (auto i : Idxs)
+ dbgs() << printReg(P.VReg, TRI, i, MRI) << "]\n";
+ else
+ dbgs() << printReg(P.VReg) << "]\n";
+ }
void dump() {
for (auto SI : RegisterMap) {
@@ -270,14 +282,10 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
Register VReg = U.getReg();
if (!takeReg(VReg))
continue;
-
- if (U.getSubReg() != AMDGPU::NoSubRegister) {
- dbgs() << U << "\n";
- }
VRegMaskPair VMP(U, *TRI);
- if (!isCoveredActive(VMP, Active)) {
+ if (Active.insert(VMP)) {
// Not in reg, hence, should have been spilled before
// FIXME: This is ODD as the Spilled set is a union among all
// predecessors and should already contain all spilled before!
@@ -517,7 +525,15 @@ void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
VRegMaskPair VMP) {
unsigned SubRegIdx = 0;
- const TargetRegisterClass *RC = getRegClassForVregMaskPair(VMP, SubRegIdx);
+ SmallVector<unsigned> Idxs;
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VMP.VReg);
+ bool HasSubReg = TRI->getCoveringSubRegIndexes(*MRI, RC, VMP.LaneMask, Idxs);
+ if (HasSubReg) {
+ SubRegIdx = Idxs[0];
+ for (int i = 1; i < Idxs.size() - 1; i++)
+ SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, Idxs[i]);
+ RC = TRI->getSubRegisterClass(RC, SubRegIdx);
+ }
int FI = assignVirt2StackSlot(VMP);
TII->storeRegToStackSlot(MBB, InsertBefore, VMP.VReg, true, FI, RC, TRI,
>From 46fcae28b3fc5de54bfb6b7cc87c9080ca86a541 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Wed, 15 Jan 2025 12:23:03 -0600
Subject: [PATCH 12/46] SSA Spiller. Spill candidates mapping minor
improvement. Sub regs reload fix.
---
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 13 +++++++------
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 5 +++--
2 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 1e6bb6e29e949..2008ab6fc2c23 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -584,7 +584,7 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
unsigned SizeToSpill = CurRP - Limit;
if (RegSize > SizeToSpill) {
const TargetRegisterClass *SuperRC = TRI->getRegClassForReg(*MRI, P.VReg);
- DenseMap<unsigned, unsigned> Cands;
+ DenseMap<unsigned, std::pair<unsigned, LaneBitmask>> Cands;
SmallVector<unsigned> Sorted;
for (auto &SubReg : MRI->reg_operands(P.VReg)) {
unsigned SubRegIdx = SubReg.getSubReg();
@@ -593,7 +593,7 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
VRegMaskPair X(P.VReg, Mask);
unsigned D = I == MBB.end() ? NU.getNextUseDistance(MBB, X)
: NU.getNextUseDistance(I, X);
- Cands[D] = SubRegIdx;
+ Cands[D] = {SubRegIdx, Mask};
Sorted.push_back(D);
}
}
@@ -601,10 +601,11 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
LaneBitmask ActiveMask = P.LaneMask;
std::sort(Sorted.begin(), Sorted.end(), [](unsigned x, unsigned y) { return x > y;});
for (auto i : Sorted) {
- unsigned SubIdx = Cands[i];
- LaneBitmask SubMask = TRI->getSubRegIndexLaneMask(Cands[i]);
+ unsigned SubIdx = Cands[i].first;
+ LaneBitmask SubMask = Cands[i].second;
VRegMaskPair Y(P.VReg, SubMask);
- dbgs() << "[ " << printReg(Y.VReg, TRI, SubIdx, MRI) << " ] : " << i << "\n";
+ dbgs() << "[ " << printReg(Y.VReg, TRI, SubIdx, MRI) << " ] : " << i
+ << "\n";
const TargetRegisterClass *RC =
TRI->getSubRegisterClass(SuperRC, SubIdx);
unsigned Size = TRI->getRegClassWeight(RC).RegWeight;
@@ -703,7 +704,7 @@ bool AMDGPUSSASpiller::isCoveredActive(VRegMaskPair VMP,
dumpRegSet(Active);
for (auto P : Active) {
if (P.VReg == VMP.VReg) {
- return (P.LaneMask & VMP.LaneMask).any();
+ return (P.LaneMask & VMP.LaneMask) == VMP.LaneMask;
}
}
return false;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index c4815dfc0ffb0..87d943668b7fe 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1925,8 +1925,9 @@ void SIInstrInfo::loadRegFromStackSlot(
}
unsigned Opcode = getVectorRegSpillRestoreOpcode(VReg ? VReg : DestReg, RC,
- SpillSize, *MFI);
- BuildMI(MBB, MI, DL, get(Opcode), DestReg)
+ SpillSize, RI, *MFI);
+ BuildMI(MBB, MI, DL, get(Opcode))
+ .addReg(DestReg, 0, SubRegIdx)
.addFrameIndex(FrameIndex) // vaddr
.addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
.addImm(0) // offset
>From 070dc1d22850afed40c228fe5fcf3dec464498d8 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Fri, 17 Jan 2025 13:26:18 -0600
Subject: [PATCH 13/46] SSA Spiller. Next Use Analysis: VRegDistances redesign.
1st buildable. WIP 20.01.25
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 52 +++--
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 198 ++++++++++++------
2 files changed, 169 insertions(+), 81 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index c2f34f0deacfc..15ba3fcf6d33d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -76,7 +76,7 @@ void NextUseResult::analyze(const MachineFunction &MF) {
if (Succ->getNumber() == SuccNum)
Weight = Infinity;
}
- mergeDistances(Curr, SuccDist, Weight);
+ Curr.merge(SuccDist, Weight);
}
}
@@ -85,25 +85,18 @@ void NextUseResult::analyze(const MachineFunction &MF) {
for (auto &MI : make_range(MBB->rbegin(), MBB->rend())) {
for (auto &P : Curr) {
- P.second++;
+ for (auto D : P.second)
+ D.second++;
}
for (auto &MO : MI.operands()) {
if (MO.isReg() && MO.getReg().isVirtual()) {
VRegMaskPair P(MO, *TRI);
if(MO.isUse()) {
- Curr[P] = 0;
- UsedInBlock[MBB->getNumber()].insert(P.VReg);
+ Curr.insert(P, 0);
+ UsedInBlock[MBB->getNumber()].insert(P);
} else if (MO.isDef()) {
-
- SmallVector<VRegMaskPair> ToUpdate;
- std::copy_if(Curr.begin(), Curr.end(), ToUpdate,
- [&](VRegMaskPair X) { return X.VReg == P.VReg; });
- for (auto &Y : ToUpdate) {
- Y.LaneMask &= ~P.LaneMask;
- if (Y.LaneMask.none())
- Curr.erase(Y);
- }
+ Curr.clear(P);
}
}
}
@@ -113,7 +106,7 @@ void NextUseResult::analyze(const MachineFunction &MF) {
UpwardNextUses[MBBNum] = std::move(Curr);
- bool Changed4MBB = diff(Prev, UpwardNextUses[MBBNum]);
+ bool Changed4MBB = (Prev != UpwardNextUses[MBBNum]);
Changed |= Changed4MBB;
}
@@ -122,15 +115,32 @@ void NextUseResult::analyze(const MachineFunction &MF) {
TG->print(llvm::errs());
}
+void NextUseResult::getFromSortedRecords(
+ const VRegDistances::SortedRecords Dists, LaneBitmask Mask, unsigned &D) {
+ for (auto P : Dists) {
+ // Records are sorted in distance increasing order. So, the first record
+ // is for the closest use.
+ LaneBitmask UseMask = P.first;
+ if ((UseMask & Mask) == UseMask) {
+ D = P.second;
+ break;
+ }
+ }
+}
+
unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock::iterator I,
const VRegMaskPair VMP) {
unsigned Dist = Infinity;
const MachineBasicBlock *MBB = I->getParent();
unsigned MBBNum = MBB->getNumber();
if (NextUseMap.contains(MBBNum) &&
- NextUseMap[MBBNum].InstrDist.contains(&*I) &&
- NextUseMap[MBBNum].InstrDist[&*I].contains(VMP))
- Dist = NextUseMap[MBBNum].InstrDist[&*I][VMP];
+ NextUseMap[MBBNum].InstrDist.contains(&*I)) {
+ VRegDistances Dists = NextUseMap[MBBNum].InstrDist[&*I];
+ if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP.VReg)) {
+ getFromSortedRecords(Dists[VMP.VReg], VMP.LaneMask, Dist);
+ }
+ }
+
return Dist;
}
@@ -138,8 +148,12 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
const VRegMaskPair VMP) {
unsigned Dist = Infinity;
unsigned MBBNum = MBB.getNumber();
- if (NextUseMap.contains(MBBNum))
- Dist = NextUseMap[MBBNum].Bottom[VMP];
+ if (NextUseMap.contains(MBBNum)) {
+ if (NextUseMap[MBBNum].Bottom.contains(VMP.VReg)) {
+ getFromSortedRecords(NextUseMap[MBBNum].Bottom[VMP.VReg], VMP.LaneMask,
+ Dist);
+ }
+ }
return Dist;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 04ab93a684e28..7c6702f9ea501 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -17,7 +17,9 @@
#include "SIRegisterInfo.h"
#include "GCNSubtarget.h"
+#include <algorithm>
#include <limits>
+#include <set>
using namespace llvm;
@@ -83,7 +85,127 @@ class NextUseResult {
Timer *T1;
Timer *T2;
- using VRegDistances = DenseMap<VRegMaskPair, unsigned>;
+ class VRegDistances {
+
+ using Record = std::pair<LaneBitmask, unsigned>;
+ struct CompareByDist {
+ bool operator()(const Record &LHS, const Record &RHS) const {
+ return LHS.second < RHS.second;
+ };
+ };
+
+public:
+ using SortedRecords = std::set<Record, CompareByDist>;
+ private:
+ DenseMap<unsigned, SortedRecords> NextUseMap;
+
+ public:
+ auto begin() { return NextUseMap.begin(); }
+ auto end() { return NextUseMap.end(); }
+
+ auto begin() const { return NextUseMap.begin(); }
+ auto end() const { return NextUseMap.end(); }
+
+ size_t size() { return NextUseMap.size(); }
+ std::pair<bool, SortedRecords> get(unsigned Key) {
+ if (NextUseMap.contains(Key))
+ return {true, NextUseMap.find(Key)->second};
+ return {false, SortedRecords()};
+ }
+
+ SortedRecords operator[] (unsigned Key) {
+ return NextUseMap[Key];
+ }
+
+ SmallVector<unsigned> keys() {
+ SmallVector<unsigned> Keys;
+ for (auto P : NextUseMap)
+ Keys.push_back(P.first);
+ return std::move(Keys);
+ }
+
+ bool contains(unsigned Key) {
+ return NextUseMap.contains(Key);
+ }
+
+ bool insert(VRegMaskPair VMP, unsigned Dist) {
+ SortedRecords &Dists = NextUseMap[VMP.VReg];
+ return Dists.insert({VMP.LaneMask, Dist}).second;
+ }
+
+ void clear(VRegMaskPair VMP) {
+ if (NextUseMap.contains(VMP.VReg)) {
+ auto &Dists = NextUseMap[VMP.VReg];
+ std::erase_if(Dists,
+ [&](Record R) { return (R.first &= ~VMP.LaneMask).none(); });
+ }
+ }
+
+ bool operator == (VRegDistances Other) {
+
+ if (Other.size() != size())
+ return false;
+
+ for (auto P : NextUseMap) {
+
+ std::pair<bool, SortedRecords> OtherDists = Other.get(P.getFirst());
+ if (!OtherDists.first)
+ return false;
+ SortedRecords &Dists = P.getSecond();
+
+ if (Dists.size() != OtherDists.second.size())
+ return false;
+
+ for (auto R : OtherDists.second) {
+ SortedRecords::iterator I = Dists.find(R);
+ if (I == Dists.end())
+ return false;
+ if (R.second != I->second)
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool operator != (VRegDistances Other) {
+ return !operator == (Other);
+ }
+
+ void merge(VRegDistances Other, unsigned Weight = 0) {
+ for (auto P : Other) {
+ unsigned Key = P.getFirst();
+ auto Dists = P.getSecond();
+ if (NextUseMap.contains(Key)) {
+ auto &MineDists = NextUseMap[Key];
+ // Merge it!
+ for (auto D : Dists) {
+ auto It = MineDists.find(D);
+ if (It == MineDists.end()) {
+ // Not found! We have a subreg use to merge in.
+ for (auto D1 : MineDists) {
+ if (D1.first == D.first && D1.second > D.second + Weight) {
+ // We have a closer use of the same reg and mask.
+ // Erase and insert new to keep it properly sorted.
+ MineDists.erase(D1);
+ MineDists.insert({D.first, D.second + Weight});
+ break;
+ }
+ }
+ // Just add a new one.
+ MineDists.insert(*It);
+ }
+ }
+ } else {
+ // Just add it!
+ if (Weight)
+ for (auto D : Dists)
+ D.second += Weight;
+ NextUseMap[Key] = Dists;
+ }
+ }
+ }
+ };
class NextUseInfo {
// FIXME: need to elaborate proper class interface!
public:
@@ -114,18 +236,6 @@ class NextUseResult {
const uint16_t Infinity = std::numeric_limits<unsigned short>::max();
void init(const MachineFunction &MF);
void analyze(const MachineFunction &MF);
- bool diff(const VRegDistances &LHS, const VRegDistances &RHS) {
- for (auto P : LHS) {
- if (!RHS.contains(P.getFirst()) ||
- RHS.lookup(P.getFirst()) != P.getSecond())
- return true;
- }
- for (auto P : RHS) {
- if (!LHS.contains(P.getFirst()))
- return true;
- }
- return false;
- }
void printVregDistances(const VRegDistances &D,
raw_ostream &O = dbgs()) const {
@@ -133,57 +243,19 @@ class NextUseResult {
for (auto P : D) {
SmallVector<unsigned> Idxs;
const TargetRegisterClass *RC =
- TRI->getRegClassForReg(*MRI, P.first.VReg);
- bool HasSubReg =
- TRI->getCoveringSubRegIndexes(*MRI, RC, P.first.LaneMask, Idxs);
- O << "Vreg: ";
- if (HasSubReg)
- for (auto i : Idxs)
- O << printReg(P.first.VReg, TRI, i, MRI) << "[ " << P.second << "]\n";
- else
- O << printReg(P.first.VReg) << "[ " << P.second << "]\n";
- }
- }
-
- void printVregDistancesD(const VRegDistances &D) const {
- dbgs() << "\n";
- for (auto P : D) {
- SmallVector<unsigned> Idxs;
- const TargetRegisterClass *RC =
- TRI->getRegClassForReg(*MRI, P.first.VReg);
- bool HasSubReg =
- TRI->getCoveringSubRegIndexes(*MRI, RC, P.first.LaneMask, Idxs);
- dbgs() << "Vreg: ";
- if (HasSubReg)
- for (auto i : Idxs)
- dbgs() << printReg(P.first.VReg, TRI, i, MRI) << "[ " << P.second
- << "]\n";
- else
- dbgs() << printReg(P.first.VReg) << "[ " << P.second << "]\n";
- }
- }
-
- // void dump(raw_ostream &O = dbgs()) const {
- // for (auto P : NextUseMap) {
- // O << "\nMBB_" << P.first << "\n";
- // printVregDistances(P.second, O);
- // }
- // }
-
- VRegDistances &mergeDistances(VRegDistances &LHS, const VRegDistances &RHS,
- unsigned Weight = 0) {
- for (auto Pair : LHS) {
- VRegMaskPair VRegMP = Pair.getFirst();
- if (RHS.contains(VRegMP)) {
- LHS[VRegMP] = std::min(Pair.getSecond(), RHS.lookup(VRegMP) + Weight);
+ TRI->getRegClassForReg(*MRI, P.first);
+ for (auto X : P.second) {
+ bool HasSubReg =
+ TRI->getCoveringSubRegIndexes(*MRI, RC, X.first, Idxs);
+ O << "Vreg: ";
+ if (HasSubReg)
+ for (auto i : Idxs)
+ O << printReg(P.first, TRI, i, MRI) << "[ " << X.second
+ << "]\n";
+ else
+ O << printReg(P.first) << "[ " << X.second << "]\n";
}
}
- for (auto Pair : RHS) {
- if (LHS.contains(Pair.getFirst()))
- continue;
- LHS[Pair.getFirst()] = Pair.getSecond() + Weight;
- }
- return LHS;
}
void clear() {
@@ -206,6 +278,8 @@ class NextUseResult {
const VRegMaskPair VMP);
unsigned getNextUseDistance(const MachineBasicBlock::iterator I,
const VRegMaskPair VMP);
+ void getFromSortedRecords(const VRegDistances::SortedRecords Dists,
+ LaneBitmask Mask, unsigned &D);
bool isDead(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const VRegMaskPair VMP) {
>From fe021ba0d4f9c3997df6f3c80820402248bd0881 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Fri, 17 Jan 2025 13:26:18 -0600
Subject: [PATCH 14/46] SSA Spiller. Next Use Analysis: VRegDistances redesign.
WIP 17.01.25
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 18 ++++++++++++++++--
llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 17 +++++++----------
2 files changed, 23 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 15ba3fcf6d33d..63ba8ebe3d1c2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -136,8 +136,22 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock::iterator I,
if (NextUseMap.contains(MBBNum) &&
NextUseMap[MBBNum].InstrDist.contains(&*I)) {
VRegDistances Dists = NextUseMap[MBBNum].InstrDist[&*I];
- if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP.VReg)) {
- getFromSortedRecords(Dists[VMP.VReg], VMP.LaneMask, Dist);
+ if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP)) {
+ // FIXME: This is not correct. The nearest use is the use of ANY
+ // subregister of a register. Hence, if we have an exact match of a
+ // register and a mask, it might happen that we miss another use that is
+ // closer but has a narrower mask (i.e. a subregister use)!
+ Dist = Dists[VMP];
+ } else {
+ for (auto P : Dists) {
+ if (P.first.VReg == VMP.VReg) {
+ LaneBitmask UseMask = P.first.LaneMask;
+ LaneBitmask Mask = VMP.LaneMask;
+ if ((UseMask & Mask) == UseMask)
+ if (P.second < Dist)
+ Dist = P.second;
+ }
+ }
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 7c6702f9ea501..47553f93273fe 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -89,23 +89,19 @@ class NextUseResult {
using Record = std::pair<LaneBitmask, unsigned>;
struct CompareByDist {
- bool operator()(const Record &LHS, const Record &RHS) const {
- return LHS.second < RHS.second;
+ bool operator()(const Record &LHS, const Record &RHS) {
+ return LHS.second > RHS.second;
};
};
-public:
using SortedRecords = std::set<Record, CompareByDist>;
- private:
+
DenseMap<unsigned, SortedRecords> NextUseMap;
public:
auto begin() { return NextUseMap.begin(); }
auto end() { return NextUseMap.end(); }
- auto begin() const { return NextUseMap.begin(); }
- auto end() const { return NextUseMap.end(); }
-
size_t size() { return NextUseMap.size(); }
std::pair<bool, SortedRecords> get(unsigned Key) {
if (NextUseMap.contains(Key))
@@ -133,10 +129,10 @@ class NextUseResult {
return Dists.insert({VMP.LaneMask, Dist}).second;
}
- void clear(VRegMaskPair VMP) {
+ bool clear(VRegMaskPair VMP) {
if (NextUseMap.contains(VMP.VReg)) {
auto &Dists = NextUseMap[VMP.VReg];
- std::erase_if(Dists,
+ remove_if(Dists,
[&](Record R) { return (R.first &= ~VMP.LaneMask).none(); });
}
}
@@ -147,7 +143,8 @@ class NextUseResult {
return false;
for (auto P : NextUseMap) {
-
+ unsigned Key = P.getFirst();
+
std::pair<bool, SortedRecords> OtherDists = Other.get(P.getFirst());
if (!OtherDists.first)
return false;
>From 680f9ea96fb492e12ea2a2cb9768e1320d7430c3 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Thu, 23 Jan 2025 15:12:00 -0600
Subject: [PATCH 15/46] SSA Spiller. Next Use Analysis: VRegDistances redesign.
1st working. WIP 23.01.25
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 38 +++---
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 117 +++++++++++-------
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 113 +++++++----------
3 files changed, 132 insertions(+), 136 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 63ba8ebe3d1c2..a75f976ed82f4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -63,10 +63,14 @@ void NextUseResult::analyze(const MachineFunction &MF) {
Prev = UpwardNextUses[MBBNum];
}
+ LLVM_DEBUG(dbgs() << "\nMerging successors for " << MBB->getName()
+ << "\n";);
for (auto Succ : successors(MBB)) {
unsigned SuccNum = Succ->getNumber();
+ LLVM_DEBUG(dbgs() << "Merging " << Succ->getName() << "\n");
+
if (UpwardNextUses.contains(SuccNum)) {
VRegDistances SuccDist = UpwardNextUses[SuccNum];
// Check if the edge from MBB to Succ goes out of the Loop
@@ -76,7 +80,14 @@ void NextUseResult::analyze(const MachineFunction &MF) {
if (Succ->getNumber() == SuccNum)
Weight = Infinity;
}
+ LLVM_DEBUG(
+ dbgs() << "Curr: ";
+ printVregDistances(Curr);
+ dbgs() << "Succ: ";
+ printVregDistances(SuccDist));
Curr.merge(SuccDist, Weight);
+ LLVM_DEBUG(dbgs() << "Curr after merge: ";
+ printVregDistances(Curr));
}
}
@@ -85,8 +96,10 @@ void NextUseResult::analyze(const MachineFunction &MF) {
for (auto &MI : make_range(MBB->rbegin(), MBB->rend())) {
for (auto &P : Curr) {
+ VRegDistances::SortedRecords Tmp;
for (auto D : P.second)
- D.second++;
+ Tmp.insert({D.first, ++D.second});
+ P.second = Tmp;
}
for (auto &MO : MI.operands()) {
@@ -101,7 +114,7 @@ void NextUseResult::analyze(const MachineFunction &MF) {
}
}
NextUseMap[MBBNum].InstrDist[&MI] = Curr;
- // printVregDistancesD(Curr);
+ // printVregDistances(Curr);
}
UpwardNextUses[MBBNum] = std::move(Curr);
@@ -117,10 +130,12 @@ void NextUseResult::analyze(const MachineFunction &MF) {
void NextUseResult::getFromSortedRecords(
const VRegDistances::SortedRecords Dists, LaneBitmask Mask, unsigned &D) {
+ LLVM_DEBUG(dbgs() << "Mask : [" << PrintLaneMask(Mask) <<"]\n");
for (auto P : Dists) {
// Records are sorted in distance increasing order. So, the first record
// is for the closest use.
LaneBitmask UseMask = P.first;
+ LLVM_DEBUG(dbgs() << "Used mask : [" << PrintLaneMask(UseMask) << "]\n");
if ((UseMask & Mask) == UseMask) {
D = P.second;
break;
@@ -136,22 +151,9 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock::iterator I,
if (NextUseMap.contains(MBBNum) &&
NextUseMap[MBBNum].InstrDist.contains(&*I)) {
VRegDistances Dists = NextUseMap[MBBNum].InstrDist[&*I];
- if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP)) {
- // FIXME: This is not correct. The nearest use is the use of ANY
- // subregister of a register. Hence, if we have an exact match of a
- // register and a mask, it might happen that we miss another use that is
- // closer but has a narrower mask (i.e. a subregister use)!
- Dist = Dists[VMP];
- } else {
- for (auto P : Dists) {
- if (P.first.VReg == VMP.VReg) {
- LaneBitmask UseMask = P.first.LaneMask;
- LaneBitmask Mask = VMP.LaneMask;
- if ((UseMask & Mask) == UseMask)
- if (P.second < Dist)
- Dist = P.second;
- }
- }
+ if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP.VReg)) {
+ // printSortedRecords(Dists[VMP.VReg], VMP.VReg);
+ getFromSortedRecords(Dists[VMP.VReg], VMP.LaneMask, Dist);
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 47553f93273fe..78dcce6f3c4b7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -102,8 +102,11 @@ class NextUseResult {
auto begin() { return NextUseMap.begin(); }
auto end() { return NextUseMap.end(); }
- size_t size() { return NextUseMap.size(); }
- std::pair<bool, SortedRecords> get(unsigned Key) {
+ auto begin() const { return NextUseMap.begin(); }
+ auto end() const { return NextUseMap.end(); }
+
+ size_t size() const { return NextUseMap.size(); }
+ std::pair<bool, SortedRecords> get(unsigned Key) const {
if (NextUseMap.contains(Key))
return {true, NextUseMap.find(Key)->second};
return {false, SortedRecords()};
@@ -125,8 +128,30 @@ class NextUseResult {
}
bool insert(VRegMaskPair VMP, unsigned Dist) {
- SortedRecords &Dists = NextUseMap[VMP.VReg];
- return Dists.insert({VMP.LaneMask, Dist}).second;
+ Record R(VMP.LaneMask, Dist);
+ if (NextUseMap.contains(VMP.VReg)) {
+ SortedRecords &Dists = NextUseMap[VMP.VReg];
+
+ if (!Dists.contains(R)) {
+ for (auto D : Dists) {
+ if (D.first == R.first) {
+ if (D.second > R.second) {
+ // Change to record with less distance
+ Dists.erase(D);
+ return Dists.insert(R).second;
+ } else {
+ return false;
+ }
+ }
+ }
+ // add new record
+ return Dists.insert(R).second;
+ } else {
+ // record already exists!
+ return false;
+ }
+ } else
+ return NextUseMap[VMP.VReg].insert(R).second;
}
bool clear(VRegMaskPair VMP) {
@@ -134,10 +159,12 @@ class NextUseResult {
auto &Dists = NextUseMap[VMP.VReg];
remove_if(Dists,
[&](Record R) { return (R.first &= ~VMP.LaneMask).none(); });
+ if (Dists.empty())
+ NextUseMap.erase(VMP.VReg);
}
}
- bool operator == (VRegDistances Other) {
+ bool operator == (const VRegDistances Other) const {
if (Other.size() != size())
return false;
@@ -165,40 +192,43 @@ class NextUseResult {
return true;
}
- bool operator != (VRegDistances Other) {
- return !operator == (Other);
+ bool operator!=(const VRegDistances &Other) const {
+ return !operator==(Other);
}
- void merge(VRegDistances Other, unsigned Weight = 0) {
+ void merge(const VRegDistances &Other, unsigned Weight = 0) {
for (auto P : Other) {
unsigned Key = P.getFirst();
auto Dists = P.getSecond();
+
if (NextUseMap.contains(Key)) {
auto &MineDists = NextUseMap[Key];
// Merge it!
for (auto D : Dists) {
- auto It = MineDists.find(D);
- if (It == MineDists.end()) {
- // Not found! We have a subreg use to merge in.
+ if (!MineDists.contains(D)) {
+ // We have a subreg use to merge in.
+ bool Exists = false;
for (auto D1 : MineDists) {
- if (D1.first == D.first && D1.second > D.second + Weight) {
- // We have a closer use of the same reg and mask.
- // Erase and insert new to keep it properly sorted.
- MineDists.erase(D1);
- MineDists.insert({D.first, D.second + Weight});
+ if (D1.first == D.first) {
+ Exists = true;
+ if (D1.second > D.second + Weight) {
+ // We have a closer use of the same reg and mask.
+ // Erase the existing one.
+ MineDists.erase(D1);
+ MineDists.insert({D.first, D.second + Weight});
+ }
break;
}
}
- // Just add a new one.
- MineDists.insert(*It);
+ if (!Exists)
+ // Insert a new one.
+ MineDists.insert({D.first, D.second + Weight});
}
}
} else {
// Just add it!
- if (Weight)
- for (auto D : Dists)
- D.second += Weight;
- NextUseMap[Key] = Dists;
+ for (auto D : Dists)
+ NextUseMap[Key].insert({D.first, D.second + Weight});
}
}
}
@@ -210,18 +240,6 @@ class NextUseResult {
DenseMap<const MachineInstr *, VRegDistances> InstrDist;
};
- // VRegMaskPair getFromOperand(const MachineOperand &MO) {
- // assert(MO.isReg() && "Not a register operand!");
- // Register R = MO.getReg();
- // assert(R.isVirtual() && "Not a virtual register!");
- // LaneBitmask Mask = LaneBitmask::getAll();
- // unsigned subRegIndex = MO.getSubReg();
- // if (subRegIndex) {
- // Mask = TRI->getSubRegIndexLaneMask(subRegIndex);
- // }
- // return {R, Mask};
- // }
-
DenseMap<unsigned, NextUseInfo> NextUseMap;
public:
@@ -233,25 +251,28 @@ class NextUseResult {
const uint16_t Infinity = std::numeric_limits<unsigned short>::max();
void init(const MachineFunction &MF);
void analyze(const MachineFunction &MF);
+ LLVM_ATTRIBUTE_NOINLINE void
+ printSortedRecords(VRegDistances::SortedRecords Records, unsigned VReg,
+ raw_ostream &O = dbgs()) const {
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
+ for (auto X : Records) {
+ SmallVector<unsigned> Idxs;
+ bool HasSubReg = TRI->getCoveringSubRegIndexes(*MRI, RC, X.first, Idxs);
+ O << "Vreg: ";
+ if (HasSubReg)
+ for (auto i : Idxs)
+ O << printReg(VReg, TRI, i, MRI) << "[ " << X.second << "]\n";
+ else
+ O << printReg(VReg) << "[ " << X.second << "]\n";
+ }
+ }
+ LLVM_ATTRIBUTE_NOINLINE
void printVregDistances(const VRegDistances &D,
raw_ostream &O = dbgs()) const {
O << "\n";
for (auto P : D) {
- SmallVector<unsigned> Idxs;
- const TargetRegisterClass *RC =
- TRI->getRegClassForReg(*MRI, P.first);
- for (auto X : P.second) {
- bool HasSubReg =
- TRI->getCoveringSubRegIndexes(*MRI, RC, X.first, Idxs);
- O << "Vreg: ";
- if (HasSubReg)
- for (auto i : Idxs)
- O << printReg(P.first, TRI, i, MRI) << "[ " << X.second
- << "]\n";
- else
- O << printReg(P.first) << "[ " << X.second << "]\n";
- }
+ printSortedRecords(P.second, P.first);
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 2008ab6fc2c23..de88d6979d3ab 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -119,11 +119,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
void init(MachineFunction &MF, bool IsVGPRs) {
IsVGPRsPass = IsVGPRs;
- TG = new TimerGroup("SSA SPiller Timing", "Time Spent in different parts of the SSA Spiller");
- T1 = new Timer("General time", "ProcessFunction", *TG);
- T2 = new Timer("Limit", "Time spent in limit()", *TG);
- T3 = new Timer("Initialization time", "Init Active Sets", *TG);
- T4 = new Timer("Instruction processing time", "Process Instruction w/o limit", *TG);
+
NumAvailableRegs =
IsVGPRsPass ? ST->getMaxNumVGPRs(MF) : ST->getMaxNumSGPRs(MF);
@@ -193,14 +189,23 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
AMDGPUSSASpiller(const LiveIntervals &LIS, MachineLoopInfo &LI,
MachineDominatorTree &MDT, AMDGPUNextUseAnalysis::Result &NU)
- : LIS(LIS), LI(LI), MDT(MDT), NU(NU),
- NumSpillSlots(0), Virt2StackSlotMap(NO_STACK_SLOT) {}
+ : LIS(LIS), LI(LI), MDT(MDT), NU(NU), NumSpillSlots(0) //,
+ // Virt2StackSlotMap(NO_STACK_SLOT) {
+ {
+ TG = new TimerGroup("SSA SPiller Timing",
+ "Time Spent in different parts of the SSA Spiller");
+ T1 = new Timer("General time", "ProcessFunction", *TG);
+ T2 = new Timer("Limit", "Time spent in limit()", *TG);
+ T3 = new Timer("Initialization time", "Init Active Sets", *TG);
+ T4 = new Timer("Instruction processing time",
+ "Process Instruction w/o limit", *TG);
+ }
~AMDGPUSSASpiller() {
delete TG;
delete T2;
delete T3;
delete T4;
- // delete TG;
+ //delete TG;
}
bool run(MachineFunction &MF);
};
@@ -237,17 +242,17 @@ AMDGPUSSASpiller::getBlockInfo(const MachineBasicBlock &MBB) {
void AMDGPUSSASpiller::processFunction(MachineFunction &MF) {
ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
- T1->startTimer();
+ // T1->startTimer();
for (auto MBB : RPOT) {
- T3->startTimer();
+ // T3->startTimer();
if (LI.isLoopHeader(MBB)) {
initActiveSetLoopHeader(*MBB);
} else {
initActiveSetUsualBlock(*MBB);
}
connectToPredecessors(*MBB);
- T3->stopTimer();
+ // T3->stopTimer();
processBlock(*MBB);
// dump();
// We process loop blocks twice: once with Spill/Active sets of
@@ -263,7 +268,7 @@ void AMDGPUSSASpiller::processFunction(MachineFunction &MF) {
PostponedLoopLatches.erase(MBB->getNumber());
}
}
- T1->stopTimer();
+ // T1->stopTimer();
}
void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
@@ -273,7 +278,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
for (MachineBasicBlock::iterator I : MBB) {
RegisterSet Reloads;
- T4->startTimer();
+ // T4->startTimer();
for (auto U : I->uses()) {
if (!U.isReg())
continue;
@@ -282,6 +287,10 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
Register VReg = U.getReg();
if (!takeReg(VReg))
continue;
+
+ // if (U.getSubReg() != AMDGPU::NoSubRegister) {
+ // dbgs() << U << "\n";
+ // }
VRegMaskPair VMP(U, *TRI);
@@ -303,20 +312,20 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
}
if (Reloads.empty() && Defs.empty()) {
- T4->stopTimer();
+ // T4->stopTimer();
continue;
}
- T4->stopTimer();
+ // T4->stopTimer();
- dumpRegSet(Active);
+ // dumpRegSet(Active);
RegisterSet ToSpill;
limit(MBB, Active, Spilled, I, NumAvailableRegs, ToSpill);
limit(MBB, Active, Spilled, std::next(I),
NumAvailableRegs - getSizeInRegs(Defs), ToSpill);
- T4->startTimer();
+ // T4->startTimer();
- dumpRegSet(Active);
+ // dumpRegSet(Active);
for (auto R : ToSpill) {
spillBefore(MBB, I, R);
@@ -328,7 +337,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
// Add reloads for VRegs in Reloads before I
for (auto R : Reloads)
reloadBefore(MBB, I, R);
- T4->stopTimer();
+ // T4->stopTimer();
}
// Now, clear dead registers.
RegisterSet Deads;
@@ -336,10 +345,10 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (NU.isDead(MBB, MBB.end(), R))
Deads.insert(R);
}
- dumpRegSet(Deads);
- dumpRegSet(Active);
+ // dumpRegSet(Deads);
+ // dumpRegSet(Active);
Active.set_subtract(Deads);
- dumpRegSet(Active);
+ // dumpRegSet(Active);
}
void AMDGPUSSASpiller::processLoop(MachineLoop *L) {
@@ -562,12 +571,12 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
MachineBasicBlock::iterator I, unsigned Limit,
RegisterSet &ToSpill) {
- T2->startTimer();
+ // T2->startTimer();
Active.remove_if([&](VRegMaskPair P) { return NU.isDead(MBB, I, P); });
unsigned CurRP = getSizeInRegs(Active);
if (CurRP <= Limit) {
- T2->stopTimer();
+ // T2->stopTimer();
return;
}
@@ -575,9 +584,6 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
dumpRegSet(Active);
- // Here we expect that the furthest use is use of %4:VReg_1024.sub_31 so its
- // size is 32bits
-
while (CurRP > Limit) {
auto P = Active.pop_back_val();
unsigned RegSize = getSizeInRegs(P);
@@ -587,6 +593,8 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
DenseMap<unsigned, std::pair<unsigned, LaneBitmask>> Cands;
SmallVector<unsigned> Sorted;
for (auto &SubReg : MRI->reg_operands(P.VReg)) {
+ if (!SubReg.isUse())
+ continue;
unsigned SubRegIdx = SubReg.getSubReg();
LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubRegIdx);
if ((P.LaneMask & Mask) != LaneBitmask::getNone()) {
@@ -604,8 +612,8 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
unsigned SubIdx = Cands[i].first;
LaneBitmask SubMask = Cands[i].second;
VRegMaskPair Y(P.VReg, SubMask);
- dbgs() << "[ " << printReg(Y.VReg, TRI, SubIdx, MRI) << " ] : " << i
- << "\n";
+ // dbgs() << "[ " << printReg(Y.VReg, TRI, SubIdx, MRI) << " ] : " << i
+ // << "\n";
const TargetRegisterClass *RC =
TRI->getSubRegisterClass(SuperRC, SubIdx);
unsigned Size = TRI->getRegClassWeight(RC).RegWeight;
@@ -619,53 +627,17 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
if (ActiveMask.any()) {
VRegMaskPair Q(P.VReg, ActiveMask);
- printVRegMaskPair(Q);
+ // printVRegMaskPair(Q);
Active.insert(Q);
}
-
- // const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, P.VReg);
- // SmallVector<unsigned> Idxs;
- // if (TRI->getCoveringSubRegIndexes(*MRI, RC, P.LaneMask, Idxs)) {
- // // TODO: in SSA form each definition always defines the whole register.
- // // So, in Active set we have a regiters with a full bit mask. Same time,
- // // if the register uses are a uses of a subregs, we are interested in
- // // spilling the furthest subreg use.
- // /*
- // %1:vreg_1024 <- def
- // ***
- // ***
- // use of %1:vreg_1024.sub0 Dist x
- // use of %1:vreg_1024.sub1
- // use of %1:vreg_1024.sub2
- // ***
- // many instructions here
- // ***
- // use of %1:vreg_1024.sub31 Dist y
-
- // We want to spill %1.sub31 as its use is the furthest one!
- // For that we'd want to build a sorted vector of the subreg uses first.
- // */
- // unsigned SubRegIdx;
- // unsigned i = 0;
- // do {
- // SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, Idxs[i]);
- // RC = TRI->getSubRegisterClass(RC, SubRegIdx);
- // SizeToSpill -= TRI->getRegClassWeight(RC).RegWeight;
- // i++;
- // } while (SizeToSpill != 0 && i < Idxs.size() - 1);
- // LaneBitmask SpillMask = TRI->getSubRegIndexLaneMask(SubRegIdx);
- // P.LaneMask &= ~SpillMask;
- // ToSpill.insert({P.VReg, SpillMask});
- // Active.insert(P);
- // }
} else {
CurRP -= RegSize;
if (!Spilled.contains(P))
ToSpill.insert(P);
}
}
- T2->stopTimer();
+ // T2->stopTimer();
}
unsigned AMDGPUSSASpiller::getSizeInRegs(const VRegMaskPair VMP) {
@@ -700,8 +672,8 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
bool AMDGPUSSASpiller::isCoveredActive(VRegMaskPair VMP,
const RegisterSet Active) {
- printVRegMaskPair(VMP);
- dumpRegSet(Active);
+ // printVRegMaskPair(VMP);
+ // dumpRegSet(Active);
for (auto P : Active) {
if (P.VReg == VMP.VReg) {
return (P.LaneMask & VMP.LaneMask) == VMP.LaneMask;
@@ -716,12 +688,13 @@ bool AMDGPUSSASpiller::run(MachineFunction &MF) {
MFI = &MF.getFrameInfo();
TRI = ST->getRegisterInfo();
TII = ST->getInstrInfo();
-
+ T1->startTimer();
init(MF, false);
processFunction(MF);
init(MF, true);
processFunction(MF);
+ T1->stopTimer();
TG->print(llvm::errs());
return false;
}
>From 7b1c59550980ceed6b5a1716ed9396c0c86c532e Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Thu, 23 Jan 2025 16:13:30 -0600
Subject: [PATCH 16/46] SSA Spiller. Next Use Analysis: getSortedSubRegs method
added. Subregs spilling simplified
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 26 +++++++++++++
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 8 +++-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 37 ++++---------------
3 files changed, 40 insertions(+), 31 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index a75f976ed82f4..7b6101b16405a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -143,6 +143,32 @@ void NextUseResult::getFromSortedRecords(
}
}
+SmallVector<VRegMaskPair>
+NextUseResult::getSortedSubregUses(const MachineBasicBlock::iterator I,
+ const VRegMaskPair VMP) {
+ SmallVector<VRegMaskPair> Result;
+ const MachineBasicBlock *MBB = I->getParent();
+ unsigned MBBNum = MBB->getNumber();
+ if (NextUseMap.contains(MBBNum) &&
+ NextUseMap[MBBNum].InstrDist.contains(&*I)) {
+ VRegDistances Dists = NextUseMap[MBBNum].InstrDist[&*I];
+ if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP.VReg)) {
+ VRegDistances::SortedRecords Dists =
+ NextUseMap[MBBNum].InstrDist[&*I][VMP.VReg];
+ LLVM_DEBUG(dbgs() << "Mask : [" << PrintLaneMask(VMP.LaneMask) << "]\n");
+ for (auto P : reverse(Dists)) {
+ LaneBitmask UseMask = P.first;
+ LLVM_DEBUG(dbgs() << "Used mask : [" << PrintLaneMask(UseMask)
+ << "]\n");
+ if ((UseMask & VMP.LaneMask) == UseMask) {
+ Result.push_back({VMP.VReg, UseMask});
+ }
+ }
+ }
+ }
+ return std::move(Result);
+}
+
unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock::iterator I,
const VRegMaskPair VMP) {
unsigned Dist = Infinity;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 78dcce6f3c4b7..38188281674c5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -299,8 +299,12 @@ class NextUseResult {
void getFromSortedRecords(const VRegDistances::SortedRecords Dists,
LaneBitmask Mask, unsigned &D);
- bool isDead(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- const VRegMaskPair VMP) {
+ SmallVector<VRegMaskPair>
+ getSortedSubregUses(const MachineBasicBlock::iterator I,
+ const VRegMaskPair VMP);
+
+ bool isDead(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const VRegMaskPair VMP) {
if (!VMP.VReg.isVirtual())
report_fatal_error("Only virtual registers allowed!\n", true);
return I == MBB.end() ? getNextUseDistance(MBB, VMP) == Infinity
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index de88d6979d3ab..0db9a9704dee7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -589,38 +589,17 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
unsigned RegSize = getSizeInRegs(P);
unsigned SizeToSpill = CurRP - Limit;
if (RegSize > SizeToSpill) {
- const TargetRegisterClass *SuperRC = TRI->getRegClassForReg(*MRI, P.VReg);
- DenseMap<unsigned, std::pair<unsigned, LaneBitmask>> Cands;
- SmallVector<unsigned> Sorted;
- for (auto &SubReg : MRI->reg_operands(P.VReg)) {
- if (!SubReg.isUse())
- continue;
- unsigned SubRegIdx = SubReg.getSubReg();
- LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubRegIdx);
- if ((P.LaneMask & Mask) != LaneBitmask::getNone()) {
- VRegMaskPair X(P.VReg, Mask);
- unsigned D = I == MBB.end() ? NU.getNextUseDistance(MBB, X)
- : NU.getNextUseDistance(I, X);
- Cands[D] = {SubRegIdx, Mask};
- Sorted.push_back(D);
- }
- }
LaneBitmask ActiveMask = P.LaneMask;
- std::sort(Sorted.begin(), Sorted.end(), [](unsigned x, unsigned y) { return x > y;});
- for (auto i : Sorted) {
- unsigned SubIdx = Cands[i].first;
- LaneBitmask SubMask = Cands[i].second;
- VRegMaskPair Y(P.VReg, SubMask);
- // dbgs() << "[ " << printReg(Y.VReg, TRI, SubIdx, MRI) << " ] : " << i
- // << "\n";
- const TargetRegisterClass *RC =
- TRI->getSubRegisterClass(SuperRC, SubIdx);
- unsigned Size = TRI->getRegClassWeight(RC).RegWeight;
+
+ SmallVector<VRegMaskPair> Sorted = NU.getSortedSubregUses(I, P);
+
+ for (auto P : Sorted) {
+ unsigned Size = getSizeInRegs(P);
CurRP -= Size;
- if (!Spilled.contains(Y))
- ToSpill.insert(Y);
- ActiveMask &= (~SubMask);
+ if (!Spilled.contains(P))
+ ToSpill.insert(P);
+ ActiveMask &= (~P.LaneMask);
if (CurRP == Limit)
break;
}
>From 9ad57d98c293fc64123331bacdf52e969469e934 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Fri, 31 Jan 2025 16:43:12 -0600
Subject: [PATCH 17/46] SSA Spiller. Multiple bugfixes. PHIs processing fixed.
Spilled VREgs excluded in LH init. WIP 31.01.25
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 9 ++
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 116 ++++++++++++++----
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 2 +-
3 files changed, 103 insertions(+), 24 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 7b6101b16405a..2d88ec46af030 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -110,6 +110,15 @@ void NextUseResult::analyze(const MachineFunction &MF) {
UsedInBlock[MBB->getNumber()].insert(P);
} else if (MO.isDef()) {
Curr.clear(P);
+ // if (!(MI.isPHI() && LI->isLoopHeader(MI.getParent())))
+ // FIXME: we add a PHIs defined Regs in the LiveIn for the loop
+ // header block. Then we compute the Take set as UsedInBlock
+ // INTERSECT LiveIn. If we remove it here we will not have it in
+ // the Active set for the loop header. We either should not add
+ // PHIs defines to LiveIn, assumiong that the room for them will
+ // be created by the "limit" as for any usual instruction, or
+ // preserve removing PHIs defines from the UsedInBlock set.
+ UsedInBlock[MBB->getNumber()].remove(P);
}
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 0db9a9704dee7..00ea6706403a2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -279,7 +279,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
for (MachineBasicBlock::iterator I : MBB) {
RegisterSet Reloads;
// T4->startTimer();
- for (auto U : I->uses()) {
+ for (auto &U : I->uses()) {
if (!U.isReg())
continue;
if (U.getReg().isPhysical())
@@ -288,13 +288,25 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (!takeReg(VReg))
continue;
- // if (U.getSubReg() != AMDGPU::NoSubRegister) {
- // dbgs() << U << "\n";
- // }
-
+
VRegMaskPair VMP(U, *TRI);
-
- if (Active.insert(VMP)) {
+
+ // We don't need to make room for the PHI uses as they operands must
+ // already present in the corresponding predecessor Active set! Just make
+ // sure it is.
+ if (I->isPHI()) {
+ auto OpNo = U.getOperandNo();
+ auto B = I->getOperand(++OpNo);
+ assert(B.isMBB());
+ MachineBasicBlock *ValueSrc = B.getMBB();
+ if (MDT.properlyDominates(ValueSrc, &MBB)) {
+ assert(getBlockInfo(*ValueSrc).ActiveSet.contains(VMP) &&
+ "PHI node input value is not live ougt predecessor!");
+ }
+ continue;
+ }
+
+ if (!isCoveredActive(VMP, Active)) {
// Not in reg, hence, should have been spilled before
// FIXME: This is ODD as the Spilled set is a union among all
// predecessors and should already contain all spilled before!
@@ -305,6 +317,13 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
}
}
}
+
+ // if (I->isPHI())
+ // // We don't need to make room for the PHI-defined values as they will be
+ // // lowered to the copies at the end of the corresponding predecessors and
+ // // occupies the same register with the corresponding PHI input value.
+ // continue;
+
RegisterSet Defs;
for (auto D : I->defs()) {
if (D.getReg().isVirtual() && takeReg(D.getReg()))
@@ -317,7 +336,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
}
// T4->stopTimer();
- // dumpRegSet(Active);
+ dumpRegSet(Active);
RegisterSet ToSpill;
limit(MBB, Active, Spilled, I, NumAvailableRegs, ToSpill);
@@ -325,16 +344,16 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
NumAvailableRegs - getSizeInRegs(Defs), ToSpill);
// T4->startTimer();
- // dumpRegSet(Active);
+ dumpRegSet(Active);
for (auto R : ToSpill) {
spillBefore(MBB, I, R);
Spilled.insert(R);
+ // FIXME: We'd want update LIS is we could!
}
- // FIXME: limit with Defs is assumed to create room for the registers being
- // defined by I. Calling with std::next(I) makes spills inserted AFTER I!!!
Active.insert(Defs.begin(), Defs.end());
// Add reloads for VRegs in Reloads before I
+ dumpRegSet(Reloads);
for (auto R : Reloads)
reloadBefore(MBB, I, R);
// T4->stopTimer();
@@ -345,10 +364,10 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (NU.isDead(MBB, MBB.end(), R))
Deads.insert(R);
}
- // dumpRegSet(Deads);
- // dumpRegSet(Active);
+ dumpRegSet(Deads);
+ dumpRegSet(Active);
Active.set_subtract(Deads);
- // dumpRegSet(Active);
+ dumpRegSet(Active);
}
void AMDGPUSSASpiller::processLoop(MachineLoop *L) {
@@ -363,9 +382,22 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
if (predecessors(&MBB).empty())
return;
+ LLVM_DEBUG(dbgs() << "\nconnectToPredecessors block " << MBB.getName());
auto &Entry = RegisterMap[MBB.getNumber()];
SmallVector<MachineBasicBlock *> Preds(predecessors(&MBB));
+ RegisterSet PHIOps;
+ for (auto &PHI : MBB.phis()) {
+ for (auto &PU : PHI.uses()) {
+ if (PU.isReg()) {
+ if (takeReg(PU.getReg())) {
+ VRegMaskPair P(PU, *TRI);
+ PHIOps.insert(P);
+ }
+ }
+ }
+ }
+
// in RPOT loop latches have not been processed yet
// their Active and Spill sets are not yet known
// Exclude from processing and postpone.
@@ -390,7 +422,16 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
set_intersect(Entry.SpillSet, Entry.ActiveSet);
for (auto Pred : Preds) {
auto PE = getBlockInfo(*Pred);
- RegisterSet ReloadInPred = set_difference(Entry.ActiveSet, PE.ActiveSet);
+ LLVM_DEBUG(dbgs() << "Curr block " << MBB.getName() << "Active Set:\n";
+ dumpRegSet(Entry.ActiveSet);
+ dbgs() << "\nPred " << Pred->getName() << "ActiveSet:\n";
+ dumpRegSet(PE.ActiveSet));
+ RegisterSet Tmp = set_difference(Entry.ActiveSet, PE.ActiveSet);
+ dumpRegSet(Tmp);
+ // Pred LiveOuts which are current block PHI operands don't need to be
+ // active across both edges.
+ RegisterSet ReloadInPred = set_difference(Tmp, PHIOps);
+ dumpRegSet(ReloadInPred);
if (!ReloadInPred.empty()) {
// We're about to insert N reloads at the end of the predecessor block.
// Make sure we have enough registers for N definitions or spill to make
@@ -424,12 +465,19 @@ void AMDGPUSSASpiller::initActiveSetUsualBlock(MachineBasicBlock &MBB) {
if (predecessors(&MBB).empty())
return;
+ LLVM_DEBUG(dbgs() << "Init Active Set " << MBB.getName() << "\n");
auto Pred = MBB.pred_begin();
RegisterSet Take = getBlockInfo(**Pred).ActiveSet;
RegisterSet Cand = getBlockInfo(**Pred).ActiveSet;
+ LLVM_DEBUG(dbgs() << "Pred's " << (*Pred)->getNumber() << " ActiveSet :";
+ dumpRegSet(Take));
+
for (Pred = std::next(Pred); Pred != MBB.pred_end(); ++Pred) {
+ LLVM_DEBUG(auto PredsActive = getBlockInfo(**Pred).ActiveSet;
+ dbgs() << "Pred's " << (*Pred)->getNumber() << " ActiveSet :";
+ dumpRegSet(PredsActive));
set_intersect(Take, getBlockInfo(**Pred).ActiveSet);
Cand.set_union(getBlockInfo(**Pred).ActiveSet);
}
@@ -438,11 +486,16 @@ void AMDGPUSSASpiller::initActiveSetUsualBlock(MachineBasicBlock &MBB) {
if (Take.empty() && Cand.empty())
return;
+ LLVM_DEBUG(dbgs()<< "Take : "; dumpRegSet(Take));
+ LLVM_DEBUG(dbgs()<< "Cand : "; dumpRegSet(Cand));
+
unsigned TakeSize = fillActiveSet(MBB, Take);
if (TakeSize < NumAvailableRegs) {
unsigned FullSize = fillActiveSet(MBB, Cand);
assert(FullSize <= NumAvailableRegs);
}
+ LLVM_DEBUG(dbgs() << MBB.getName() << "Exit ActiveSet: ";
+ dumpRegSet(getBlockInfo(MBB).ActiveSet));
}
void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
@@ -453,31 +506,46 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
Register VReg = Register::index2VirtReg(i);
if (!LIS.hasInterval(VReg))
continue;
+
if (takeReg(VReg) && LIS.isLiveInToMBB(LIS.getInterval(VReg), &MBB)) {
LiveIn.insert({VReg, LaneBitmask::getAll()});
}
}
- for (auto &PHI : MBB.phis()) {
- for (auto U : PHI.uses()) {
- if (U.isReg() && takeReg(U.getReg())) {
- // assume PHIs operands are always virtual regs
- LiveIn.insert(VRegMaskPair(U, *TRI));
- }
- }
+ LLVM_DEBUG(dbgs() << "\nBlock " << MBB.getName() << " Live Ins: ";
+ dumpRegSet(LiveIn));
+
+ // FIXME: We forced to collect pred's spill here so, maybe we need to move
+ // pred's spill processing from connectToPredecessors to init? Or at least
+ // don't do it again in connectToPredecessors if it is already done here?
+ auto &Entry = RegisterMap[MBB.getNumber()];
+ auto &Spilled = Entry.SpillSet;
+ for (auto P : predecessors(&MBB)) {
+ Spilled.set_union(getBlockInfo(*P).SpillSet);
}
RegisterSet UsedInLoop;
MachineLoop *L = LI.getLoopFor(&MBB);
for (auto B : L->blocks()) {
RegisterSet Tmp(NU.usedInBlock(*B));
+ LLVM_DEBUG(dbgs() << "\nBlock " << B->getName()
+ << " is part of the loop. Used in block: ";
+ dumpRegSet(Tmp));
UsedInLoop.set_union(Tmp);
}
+ LLVM_DEBUG(dbgs() << "Total used in loop: "; dumpRegSet(UsedInLoop));
+
// Take - LiveIns used in Loop. Cand - LiveThrough
RegisterSet Take = set_intersection(LiveIn, UsedInLoop);
RegisterSet Cand = set_difference(LiveIn, UsedInLoop);
+ // We don't want to reload those not used in the loop which have been already
+ // spilled.
+ Cand.set_subtract(Spilled);
+ LLVM_DEBUG(dbgs() << "\nBlock " << MBB.getName() << "sets\n";
+ dbgs() << "Take : "; dumpRegSet(Take); dbgs() << "Cand : ";
+ dumpRegSet(Cand));
unsigned TakeSize = fillActiveSet(MBB, Take);
if (TakeSize < NumAvailableRegs) {
@@ -492,6 +560,8 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
unsigned FullSize = fillActiveSet(MBB, Cand, FreeSpace);
assert(FullSize <= NumAvailableRegs);
}
+ LLVM_DEBUG(dbgs() << "\nFinal Loop header Active :";
+ dumpRegSet(getBlockInfo(MBB).ActiveSet));
}
const TargetRegisterClass *
@@ -641,7 +711,7 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
sortRegSetAt(MBB, MBB.begin(), S);
for (auto VMP : S) {
unsigned RSize = getSizeInRegs(VMP);
- if (Size + RSize < Limit) {
+ if (Size + RSize <= Limit) {
Active.insert(VMP);
Size += RSize;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 87d943668b7fe..5b9e2111c4d7e 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1927,7 +1927,7 @@ void SIInstrInfo::loadRegFromStackSlot(
unsigned Opcode = getVectorRegSpillRestoreOpcode(VReg ? VReg : DestReg, RC,
SpillSize, RI, *MFI);
BuildMI(MBB, MI, DL, get(Opcode))
- .addReg(DestReg, 0, SubRegIdx)
+ .addReg(DestReg, RegState::Define, SubRegIdx)
.addFrameIndex(FrameIndex) // vaddr
.addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
.addImm(0) // offset
>From 334e971ca9ad07231879c04693f2d78c29d2d0a7 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Thu, 6 Feb 2025 08:11:44 -0600
Subject: [PATCH 18/46] SSA Spiller. UsedInBlock fixed
---
llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 12 ++++++++++++
llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 6 ++++--
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 3 ++-
3 files changed, 18 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 2d88ec46af030..03e5486bf1670 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -133,6 +133,7 @@ void NextUseResult::analyze(const MachineFunction &MF) {
Changed |= Changed4MBB;
}
}
+ dumpUsedInBlock();
T1->stopTimer();
TG->print(llvm::errs());
}
@@ -178,6 +179,17 @@ NextUseResult::getSortedSubregUses(const MachineBasicBlock::iterator I,
return std::move(Result);
}
+void NextUseResult::dumpUsedInBlock() {
+ LLVM_DEBUG(for (auto P
+ : UsedInBlock) {
+ dbgs() << "MBB_" << P.first << ":\n";
+ for (auto VMP : P.second) {
+ dbgs() << "[ " << printReg(VMP.VReg) << " : <"
+ << PrintLaneMask(VMP.LaneMask) << "> ]\n";
+ }
+ });
+}
+
unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock::iterator I,
const VRegMaskPair VMP) {
unsigned Dist = Infinity;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 38188281674c5..c31419d25c583 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -311,9 +311,11 @@ class NextUseResult {
: getNextUseDistance(I, VMP) == Infinity;
}
- SetVector<Register> usedInBlock(MachineBasicBlock &MBB) {
- return std::move(UsedInBlock[MBB.getNumber()]);
+ SetVector<VRegMaskPair>& usedInBlock(MachineBasicBlock &MBB) {
+ return UsedInBlock[MBB.getNumber()];
}
+
+ void dumpUsedInBlock();
};
class AMDGPUNextUseAnalysis : public AnalysisInfoMixin<AMDGPUNextUseAnalysis> {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 00ea6706403a2..200a88bf35671 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -527,7 +527,8 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
RegisterSet UsedInLoop;
MachineLoop *L = LI.getLoopFor(&MBB);
for (auto B : L->blocks()) {
- RegisterSet Tmp(NU.usedInBlock(*B));
+ RegisterSet Tmp = NU.usedInBlock(*B);
+ Tmp.remove_if([&](VRegMaskPair P) { return !takeReg(P.VReg); });
LLVM_DEBUG(dbgs() << "\nBlock " << B->getName()
<< " is part of the loop. Used in block: ";
dumpRegSet(Tmp));
>From be4853eda39986d335edeb1158fb5a1810a8eda7 Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Fri, 7 Feb 2025 20:38:28 +0100
Subject: [PATCH 19/46] SSA Spiller. LIS for the spill/reload. PHIs processing
redesigned. SSA Updater in reload.
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 120 ++++++++-----
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 10 +-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 159 +++++++++++++++---
3 files changed, 214 insertions(+), 75 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 03e5486bf1670..969049d939d61 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -34,14 +34,10 @@ void NextUseResult::init(const MachineFunction &MF) {
T2 = new Timer("Next Use Analysis", "Time spent in computeNextUseDistance()",
*TG);
for (auto L : LI->getLoopsInPreorder()) {
- SmallVector<MachineBasicBlock *> Exiting;
- L->getExitingBlocks(Exiting);
- for (auto B : Exiting) {
- for (auto S : successors(B)) {
- if (!L->contains(S)) {
- EdgeWeigths[B->getNumber()] = S->getNumber();
- }
- }
+ SmallVector<std::pair<MachineBasicBlock *, MachineBasicBlock *>> Exiting;
+ L->getExitEdges(Exiting);
+ for (auto P : Exiting) {
+ LoopExits[P.first->getNumber()] = P.second->getNumber();
}
}
}
@@ -54,7 +50,7 @@ void NextUseResult::analyze(const MachineFunction &MF) {
DenseMap<unsigned, VRegDistances> UpwardNextUses;
T1->startTimer();
bool Changed = true;
- while(Changed) {
+ while (Changed) {
Changed = false;
for (auto MBB : post_order(&MF)) {
unsigned MBBNum = MBB->getNumber();
@@ -63,38 +59,80 @@ void NextUseResult::analyze(const MachineFunction &MF) {
Prev = UpwardNextUses[MBBNum];
}
- LLVM_DEBUG(dbgs() << "\nMerging successors for " << MBB->getName()
- << "\n";);
+ LLVM_DEBUG(dbgs() << "\nMerging successors for " << "MBB_"
+ << MBB->getNumber() << "." << MBB->getName() << "\n";);
for (auto Succ : successors(MBB)) {
unsigned SuccNum = Succ->getNumber();
- LLVM_DEBUG(dbgs() << "Merging " << Succ->getName() << "\n");
+ if (!UpwardNextUses.contains(SuccNum))
+ continue;
- if (UpwardNextUses.contains(SuccNum)) {
- VRegDistances SuccDist = UpwardNextUses[SuccNum];
- // Check if the edge from MBB to Succ goes out of the Loop
- unsigned Weight = 0;
- if (EdgeWeigths.contains(MBB->getNumber())) {
- int SuccNum = EdgeWeigths[MBB->getNumber()];
- if (Succ->getNumber() == SuccNum)
- Weight = Infinity;
+ VRegDistances SuccDist = UpwardNextUses[SuccNum];
+ LLVM_DEBUG(dbgs() << "\nMerging " << "MBB_" << Succ->getNumber() << "."
+ << Succ->getName() << "\n");
+
+ // Check if the edge from MBB to Succ goes out of the Loop
+ unsigned Weight = 0;
+ if (LoopExits.contains(MBB->getNumber())) {
+ int SuccNum = LoopExits[MBB->getNumber()];
+ if (Succ->getNumber() == SuccNum)
+ Weight = Infinity;
+ }
+
+ if (LI->getLoopDepth(MBB) < LI->getLoopDepth(Succ)) {
+ // MBB->Succ is entering the Succ's loop
+ // Clear out the Loop-Exiting wights.
+ for (auto &P : SuccDist) {
+ auto &Dists = P.second;
+ for (auto R : Dists) {
+ if (R.second >= Infinity) {
+ std::pair<LaneBitmask, unsigned> New = R;
+ New.second -= Infinity;
+ Dists.erase(R);
+ Dists.insert(New);
+ }
+ }
}
- LLVM_DEBUG(
- dbgs() << "Curr: ";
- printVregDistances(Curr);
- dbgs() << "Succ: ";
- printVregDistances(SuccDist));
- Curr.merge(SuccDist, Weight);
- LLVM_DEBUG(dbgs() << "Curr after merge: ";
- printVregDistances(Curr));
+ }
+ LLVM_DEBUG(dbgs() << "\nCurr: "; printVregDistances(Curr);
+ dbgs() << "\nSucc: "; printVregDistances(SuccDist));
+
+ Curr.merge(SuccDist, Weight);
+ LLVM_DEBUG(dbgs() << "\nCurr after merge: "; printVregDistances(Curr));
+ // Now take care of the PHIs operands in the Succ
+ for (auto &PHI : Succ->phis()) {
+ for (auto &U : PHI.uses()) {
+ if (U.isReg()) {
+ auto OpNo = U.getOperandNo();
+ auto B = PHI.getOperand(++OpNo);
+ assert(B.isMBB());
+ MachineBasicBlock *ValueSrc = B.getMBB();
+ if (ValueSrc->getNumber() == MBB->getNumber()) {
+ // We assume that all the PHIs have zero distance from the
+ // succ end!
+ Curr.insert({U.getReg(), LaneBitmask::getAll()}, 0);
+ }
+ }
+ }
+ for (auto &U : PHI.defs())
+ Curr.clear({U.getReg(), LaneBitmask::getAll()});
}
}
+ LLVM_DEBUG(dbgs() << "\nCurr after succsessors processing: ";
+ printVregDistances(Curr));
NextUseMap[MBBNum].Bottom = Curr;
for (auto &MI : make_range(MBB->rbegin(), MBB->rend())) {
-
+
+ if (MI.isPHI())
+ // We'll take care of PHIs when merging this block to it's
+ // predecessor.
+ continue;
+
+ // TODO: Compute distances in some modifiable container and copy to
+ // the std::set once when ready in one loop!
for (auto &P : Curr) {
VRegDistances::SortedRecords Tmp;
for (auto D : P.second)
@@ -105,38 +143,32 @@ void NextUseResult::analyze(const MachineFunction &MF) {
for (auto &MO : MI.operands()) {
if (MO.isReg() && MO.getReg().isVirtual()) {
VRegMaskPair P(MO, *TRI);
- if(MO.isUse()) {
+ if (MO.isUse()) {
Curr.insert(P, 0);
UsedInBlock[MBB->getNumber()].insert(P);
} else if (MO.isDef()) {
Curr.clear(P);
- // if (!(MI.isPHI() && LI->isLoopHeader(MI.getParent())))
- // FIXME: we add a PHIs defined Regs in the LiveIn for the loop
- // header block. Then we compute the Take set as UsedInBlock
- // INTERSECT LiveIn. If we remove it here we will not have it in
- // the Active set for the loop header. We either should not add
- // PHIs defines to LiveIn, assumiong that the room for them will
- // be created by the "limit" as for any usual instruction, or
- // preserve removing PHIs defines from the UsedInBlock set.
- UsedInBlock[MBB->getNumber()].remove(P);
+ UsedInBlock[MBB->getNumber()].remove(P);
}
}
}
NextUseMap[MBBNum].InstrDist[&MI] = Curr;
- // printVregDistances(Curr);
}
+ LLVM_DEBUG(dbgs() << "\nFinal distances for MBB_" << MBB->getNumber()
+ << "." << MBB->getName() << "\n";
+ printVregDistances(Curr));
UpwardNextUses[MBBNum] = std::move(Curr);
bool Changed4MBB = (Prev != UpwardNextUses[MBBNum]);
Changed |= Changed4MBB;
}
+ }
+ dumpUsedInBlock();
+ T1->stopTimer();
+ TG->print(llvm::errs());
}
- dumpUsedInBlock();
- T1->stopTimer();
- TG->print(llvm::errs());
-}
void NextUseResult::getFromSortedRecords(
const VRegDistances::SortedRecords Dists, LaneBitmask Mask, unsigned &D) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index c31419d25c583..5cc0d8faf154b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -112,9 +112,7 @@ class NextUseResult {
return {false, SortedRecords()};
}
- SortedRecords operator[] (unsigned Key) {
- return NextUseMap[Key];
- }
+ SortedRecords &operator[](unsigned Key) { return NextUseMap[Key]; }
SmallVector<unsigned> keys() {
SmallVector<unsigned> Keys;
@@ -246,8 +244,8 @@ class NextUseResult {
private:
- DenseMap<unsigned, SetVector<Register>> UsedInBlock;
- DenseMap<int, int> EdgeWeigths;
+ DenseMap<unsigned, SetVector<VRegMaskPair>> UsedInBlock;
+ DenseMap<int, int> LoopExits;
const uint16_t Infinity = std::numeric_limits<unsigned short>::max();
void init(const MachineFunction &MF);
void analyze(const MachineFunction &MF);
@@ -278,7 +276,7 @@ class NextUseResult {
void clear() {
NextUseMap.clear();
- EdgeWeigths.clear();
+ LoopExits.clear();
}
public:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 200a88bf35671..6dc9bd8edc345 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -4,6 +4,7 @@
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineSSAUpdater.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
@@ -24,11 +25,11 @@ namespace {
class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
- const LiveIntervals &LIS;
+ LiveIntervals &LIS;
MachineLoopInfo &LI;
MachineDominatorTree &MDT;
AMDGPUNextUseAnalysis::Result &NU;
- const MachineRegisterInfo *MRI;
+ MachineRegisterInfo *MRI;
const SIRegisterInfo *TRI;
const SIInstrInfo *TII;
const GCNSubtarget *ST;
@@ -187,7 +188,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
public:
AMDGPUSSASpiller() = default;
- AMDGPUSSASpiller(const LiveIntervals &LIS, MachineLoopInfo &LI,
+ AMDGPUSSASpiller(LiveIntervals &LIS, MachineLoopInfo &LI,
MachineDominatorTree &MDT, AMDGPUNextUseAnalysis::Result &NU)
: LIS(LIS), LI(LI), MDT(MDT), NU(NU), NumSpillSlots(0) //,
// Virt2StackSlotMap(NO_STACK_SLOT) {
@@ -293,7 +294,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
// We don't need to make room for the PHI uses as they operands must
// already present in the corresponding predecessor Active set! Just make
- // sure it is.
+ // sure they really are.
if (I->isPHI()) {
auto OpNo = U.getOperandNo();
auto B = I->getOperand(++OpNo);
@@ -312,17 +313,25 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
// predecessors and should already contain all spilled before!
// SPECIAL CASE: undef
if (!U.isUndef()) {
- Spilled.insert(VMP);
Reloads.insert(VMP);
}
}
}
- // if (I->isPHI())
- // // We don't need to make room for the PHI-defined values as they will be
- // // lowered to the copies at the end of the corresponding predecessors and
- // // occupies the same register with the corresponding PHI input value.
- // continue;
+ if (I->isPHI()) {
+ // We don't need to make room for the PHI-defined values as they will be
+ // lowered to the copies at the end of the corresponding predecessors and
+ // occupies the same register with the corresponding PHI input value.
+ // Nevertheless, we must add them to the Active to indicate their values
+ // are available.
+ for (auto D : I->defs()) {
+ Register R = D.getReg();
+ if (takeReg(R)) {
+ Active.insert({R, LaneBitmask::getAll()});
+ }
+ }
+ continue;
+ }
RegisterSet Defs;
for (auto D : I->defs()) {
@@ -336,7 +345,15 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
}
// T4->stopTimer();
- dumpRegSet(Active);
+ LLVM_DEBUG(dbgs() << "\nCurrent Active set is:\n"; dumpRegSet(Active));
+ LLVM_DEBUG(dbgs() << "\nVRegs used but spilled before, we're to reload:\n";
+ dumpRegSet(Reloads));
+
+ Active.insert(Reloads.begin(), Reloads.end());
+ Spilled.insert(Reloads.begin(), Reloads.end());
+
+ LLVM_DEBUG(dbgs() << "\nActive set with uses reloaded:\n";
+ dumpRegSet(Active));
RegisterSet ToSpill;
limit(MBB, Active, Spilled, I, NumAvailableRegs, ToSpill);
@@ -344,8 +361,6 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
NumAvailableRegs - getSizeInRegs(Defs), ToSpill);
// T4->startTimer();
- dumpRegSet(Active);
-
for (auto R : ToSpill) {
spillBefore(MBB, I, R);
Spilled.insert(R);
@@ -358,16 +373,51 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
reloadBefore(MBB, I, R);
// T4->stopTimer();
}
- // Now, clear dead registers.
+ // Now, clear dead registers. We generally take care of trimming deads at the
+ // entry to "limit". The dangling deads may appear when operand is SGPR but
+ // result is VGPR, so we don't enter to the limit second time to make room for
+ // the result. If this is the last use of the SGPR operand it is effectively
+ // dead.
+ // %X:sreg_32 = ...
+ // ***
+ // %Y:vgpr_32 = COPY %X:sreg_32 <-- %X is dead but we won't call "limit" for
+ // %Y in this pass.
RegisterSet Deads;
for (auto R : Active) {
if (NU.isDead(MBB, MBB.end(), R))
Deads.insert(R);
}
- dumpRegSet(Deads);
- dumpRegSet(Active);
- Active.set_subtract(Deads);
- dumpRegSet(Active);
+
+ if (!Deads.empty()) {
+ LLVM_DEBUG(dbgs() << "\nThese VRegs are DEAD at the end of MBB_"
+ << MBB.getNumber() << "." << MBB.getName() << "\n";
+ dumpRegSet(Deads));
+ Active.set_subtract(Deads);
+ LLVM_DEBUG(dbgs() << "\nActive set after DEAD VRegs removed:\n";
+ dumpRegSet(Active));
+ }
+
+ // Take care of the LiveOuts which are Succ's PHI operands.
+ for (auto Succ : successors(&MBB)) {
+ for (auto &PHI : Succ->phis()) {
+ for (auto &U : PHI.uses()) {
+ if (U.isReg() && takeReg(U.getReg())) {
+ auto OpNo = U.getOperandNo();
+ auto B = PHI.getOperand(++OpNo);
+ assert(B.isMBB());
+ MachineBasicBlock *ValueSrc = B.getMBB();
+ if (ValueSrc->getNumber() == MBB.getNumber()) {
+ VRegMaskPair VMP(U, *TRI);
+ if (!isCoveredActive(VMP, Active)) {
+ reloadBefore(MBB, MBB.getFirstInstrTerminator(), VMP);
+ }
+ }
+ }
+ }
+ }
+ }
+ LLVM_DEBUG(dbgs() << "\nActive set after Succs PHI operands processing:\n";
+ dumpRegSet(Active));
}
void AMDGPUSSASpiller::processLoop(MachineLoop *L) {
@@ -422,9 +472,11 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
set_intersect(Entry.SpillSet, Entry.ActiveSet);
for (auto Pred : Preds) {
auto PE = getBlockInfo(*Pred);
- LLVM_DEBUG(dbgs() << "Curr block " << MBB.getName() << "Active Set:\n";
+ LLVM_DEBUG(dbgs() << "\nCurr block [ MBB_" << MBB.getNumber() << "."
+ << MBB.getName() << " ] Active Set:\n";
dumpRegSet(Entry.ActiveSet);
- dbgs() << "\nPred " << Pred->getName() << "ActiveSet:\n";
+ dbgs() << "\nPred [ MBB_" << Pred->getNumber() << "."
+ << Pred->getName() << " ] ActiveSet:\n";
dumpRegSet(PE.ActiveSet));
RegisterSet Tmp = set_difference(Entry.ActiveSet, PE.ActiveSet);
dumpRegSet(Tmp);
@@ -432,6 +484,8 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
// active across both edges.
RegisterSet ReloadInPred = set_difference(Tmp, PHIOps);
dumpRegSet(ReloadInPred);
+ set_intersect(ReloadInPred, PE.SpillSet);
+ dumpRegSet(ReloadInPred);
if (!ReloadInPred.empty()) {
// We're about to insert N reloads at the end of the predecessor block.
// Make sure we have enough registers for N definitions or spill to make
@@ -597,8 +651,39 @@ void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
unsigned SubRegIdx = 0;
const TargetRegisterClass *RC = getRegClassForVregMaskPair(VMP, SubRegIdx);
int FI = getStackSlot(VMP);
- TII->loadRegFromStackSlot(MBB, InsertBefore, VMP.VReg, FI,
- RC, TRI, VMP.VReg, SubRegIdx);
+ Register NewVReg = MRI->createVirtualRegister(RC);
+ TII->loadRegFromStackSlot(MBB, InsertBefore, NewVReg, FI, RC, TRI, NewVReg,
+ SubRegIdx);
+ // FIXME: dirty hack! To avoid further changing the TargetInstrInfo interface.
+ MachineInstr &ReloadMI = *(--InsertBefore);
+ LIS.InsertMachineInstrInMaps(ReloadMI);
+ MachineSSAUpdater Updater(*MBB.getParent());
+ Updater.Initialize(NewVReg);
+ Updater.AddAvailableValue(ReloadMI.getParent(), NewVReg);
+ // FIXME: we'd better pass the exact UseMI here to avoid scanning all the
+ // users. isCoveredActive takes care of possible uses with the mask narrower
+ // then this, reloaded here.
+ SmallVector<MachineOperand*> ToUpdate;
+ for (auto &U : MRI->use_nodbg_operands(VMP.VReg)) {
+ MachineInstr *UseMI = U.getParent();
+ if (MDT.dominates(&ReloadMI, UseMI)) {
+ ToUpdate.push_back(&U);
+ } else if (UseMI->isPHI()) {
+ unsigned OpNo = U.getOperandNo();
+ MachineOperand MBBOp = UseMI->getOperand(++OpNo);
+ assert(MBBOp.isMBB() && "Not PHI instruction or malformed PHI!");
+ MachineBasicBlock *SourceMBB = MBBOp.getMBB();
+ if (SourceMBB == &MBB)
+ ToUpdate.push_back(&U);
+ }
+ }
+ for (auto U : ToUpdate) {
+ // FIXME: Do we always want "AtEndOfBlock"?
+ U->setReg(Updater.GetValueAtEndOfBlock(&MBB));
+ }
+ LIS.createAndComputeVirtRegInterval(NewVReg);
+ auto &Entry = getBlockInfo(MBB);
+ Entry.ActiveSet.insert({NewVReg, LaneBitmask::getAll()});
}
void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
@@ -618,6 +703,22 @@ void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
int FI = assignVirt2StackSlot(VMP);
TII->storeRegToStackSlot(MBB, InsertBefore, VMP.VReg, true, FI, RC, TRI,
VMP.VReg, SubRegIdx);
+ // FIXME: dirty hack! To avoid further changing the TargetInstrInfo interface.
+ MachineInstr &Spill = *(--InsertBefore);
+ LIS.InsertMachineInstrInMaps(Spill);
+
+ if (LIS.hasInterval(VMP.VReg)) {
+ LiveInterval &LI = LIS.getInterval(VMP.VReg);
+ SlotIndex KillIdx = LIS.getInstructionIndex(Spill);
+ auto LR = LI.find(KillIdx);
+ if (LR != LI.end()) {
+ SlotIndex Start = LR->start;
+ SlotIndex End = LR->end;
+ if (Start < KillIdx) {
+ LI.removeSegment(KillIdx, End);
+ }
+ }
+ }
}
unsigned AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
@@ -643,8 +744,13 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
RegisterSet &ToSpill) {
// T2->startTimer();
+ LLVM_DEBUG(dbgs() << "\nIn \"limit\" with Limit = " << Limit << "\n");
+
Active.remove_if([&](VRegMaskPair P) { return NU.isDead(MBB, I, P); });
+ LLVM_DEBUG(dbgs() << "\nActive set after DEAD VRegs removed:\n";
+ dumpRegSet(Active));
+
unsigned CurRP = getSizeInRegs(Active);
if (CurRP <= Limit) {
// T2->stopTimer();
@@ -652,8 +758,8 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
}
sortRegSetAt(MBB, I, Active);
-
- dumpRegSet(Active);
+
+ LLVM_DEBUG(dbgs() << "\nActive set sorted at" << *I; dumpRegSet(Active));
while (CurRP > Limit) {
auto P = Active.pop_back_val();
@@ -687,6 +793,8 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
ToSpill.insert(P);
}
}
+ LLVM_DEBUG(dbgs() << "\nActive set after at the end of the \"limit\":\n";
+ dumpRegSet(Active));
// T2->stopTimer();
}
@@ -744,6 +852,7 @@ bool AMDGPUSSASpiller::run(MachineFunction &MF) {
init(MF, true);
processFunction(MF);
+ MF.viewCFG();
T1->stopTimer();
TG->print(llvm::errs());
return false;
@@ -792,7 +901,7 @@ class AMDGPUSSASpillerLegacy : public MachineFunctionPass {
};
bool AMDGPUSSASpillerLegacy::runOnMachineFunction(MachineFunction &MF) {
- const LiveIntervals &LIS = getAnalysis<LiveIntervalsWrapperPass>().getLIS();
+ LiveIntervals &LIS = getAnalysis<LiveIntervalsWrapperPass>().getLIS();
MachineLoopInfo &LI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
MachineDominatorTree &MDT =
getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
>From f047c0e865c371a109f05be76b52cd556e67f6ef Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Thu, 13 Feb 2025 21:15:23 +0100
Subject: [PATCH 20/46] SSA Spiller: yet another way to populate the uses list
to update after reload. Comment: Lets keep it in separate commit for now
---
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 36 ++++++++++++---------
1 file changed, 21 insertions(+), 15 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 6dc9bd8edc345..cc17912e3bc07 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -40,12 +40,9 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
unsigned NumSpillSlots;
DenseMap<VRegMaskPair, unsigned> Virt2StackSlotMap;
+ DenseMap<VRegMaskPair, MachineInstr *> SpillPoints;
- // TODO: HOW TO MAP VREG + LANEMASK TO SPILL SLOT ???
-
- // IF IT EVEN POSSIBLE TO SPILL REG.SUBREG ?
-
- // CREATE NEW PSEUDOS SI_SPILL_XXX_SAVE/RESTORE_WITH_SUBREG ???
+ LLVM_ATTRIBUTE_NOINLINE void dumpRegSet(SetVector<VRegMaskPair> VMPs);
unsigned createSpillSlot(const TargetRegisterClass *RC) {
unsigned Size = TRI->getSpillSize(*RC);
@@ -665,16 +662,24 @@ void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
// then this, reloaded here.
SmallVector<MachineOperand*> ToUpdate;
for (auto &U : MRI->use_nodbg_operands(VMP.VReg)) {
- MachineInstr *UseMI = U.getParent();
- if (MDT.dominates(&ReloadMI, UseMI)) {
- ToUpdate.push_back(&U);
- } else if (UseMI->isPHI()) {
- unsigned OpNo = U.getOperandNo();
- MachineOperand MBBOp = UseMI->getOperand(++OpNo);
- assert(MBBOp.isMBB() && "Not PHI instruction or malformed PHI!");
- MachineBasicBlock *SourceMBB = MBBOp.getMBB();
- if (SourceMBB == &MBB)
- ToUpdate.push_back(&U);
+ // MachineInstr *UseMI = U.getParent();
+ // if (MDT.dominates(&ReloadMI, UseMI)) {
+ // ToUpdate.push_back(&U);
+ // } else if (UseMI->isPHI()) {
+ // unsigned OpNo = U.getOperandNo();
+ // MachineOperand MBBOp = UseMI->getOperand(++OpNo);
+ // assert(MBBOp.isMBB() && "Not PHI instruction or malformed PHI!");
+ // MachineBasicBlock *SourceMBB = MBBOp.getMBB();
+ // if (SourceMBB == &MBB)
+ // ToUpdate.push_back(&U);
+ // }
+
+
+ if (SpillPoints.contains(VMP)) {
+ MachineInstr *UseMI = U.getParent();
+ MachineInstr *Spill = SpillPoints[VMP];
+ if (UseMI != Spill && MDT.dominates(Spill, UseMI))
+ ToUpdate.push_back(&U);
}
}
for (auto U : ToUpdate) {
@@ -719,6 +724,7 @@ void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
}
}
}
+ SpillPoints[VMP] = &Spill;
}
unsigned AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
>From 0b28ab9df1bae760d257cbec0e70d918fd83f6be Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Fri, 14 Feb 2025 21:35:29 +0100
Subject: [PATCH 21/46] SSA Spiller. limit iterator fix. Minor tweaks. Ready
for demo
---
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 131 ++++++++++----------
1 file changed, 68 insertions(+), 63 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index cc17912e3bc07..5b2eed693bf92 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -145,9 +145,9 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
MachineBasicBlock::iterator InsertBefore, VRegMaskPair VMP);
unsigned getLoopMaxRP(MachineLoop *L);
- void limit(MachineBasicBlock &MBB, RegisterSet &Active, RegisterSet &Spilled,
- MachineBasicBlock::iterator I, unsigned Limit,
- RegisterSet &ToSpill);
+ // Returns number of spilled VRegs
+ unsigned limit(MachineBasicBlock &MBB, RegisterSet &Active, RegisterSet &Spilled,
+ MachineBasicBlock::iterator I, unsigned Limit);
unsigned getSizeInRegs(const VRegMaskPair VMP);
unsigned getSizeInRegs(const RegisterSet VRegs);
@@ -175,6 +175,11 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
SmallVector<VRegMaskPair> Tmp(VRegs.takeVector());
sort(Tmp, SortByDist);
VRegs.insert(Tmp.begin(), Tmp.end());
+ LLVM_DEBUG(dbgs() << "\nActive set sorted at " << *I;
+ for (auto P : VRegs) {
+ printVRegMaskPair(P);
+ dbgs() << " : " << M[P] << "\n";
+ });
}
unsigned fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
@@ -225,9 +230,9 @@ AMDGPUSSASpiller::printVRegMaskPair(const VRegMaskPair P) {
dbgs() << "Vreg: [";
if (HasSubReg)
for (auto i : Idxs)
- dbgs() << printReg(P.VReg, TRI, i, MRI) << "]\n";
+ dbgs() << printReg(P.VReg, TRI, i, MRI) << "] ";
else
- dbgs() << printReg(P.VReg) << "]\n";
+ dbgs() << printReg(P.VReg) << "] ";
}
AMDGPUSSASpiller::SpillInfo &
@@ -274,7 +279,8 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
RegisterSet &Active = Entry.ActiveSet;
RegisterSet &Spilled = Entry.SpillSet;
- for (MachineBasicBlock::iterator I : MBB) {
+ // for (MachineBasicBlock::iterator I : MBB) {
+ for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); I++) {
RegisterSet Reloads;
// T4->startTimer();
for (auto &U : I->uses()) {
@@ -286,12 +292,11 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (!takeReg(VReg))
continue;
-
VRegMaskPair VMP(U, *TRI);
// We don't need to make room for the PHI uses as they operands must
- // already present in the corresponding predecessor Active set! Just make
- // sure they really are.
+ // already present in the corresponding predecessor Active set! Just
+ // make sure they really are.
if (I->isPHI()) {
auto OpNo = U.getOperandNo();
auto B = I->getOperand(++OpNo);
@@ -317,10 +322,10 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (I->isPHI()) {
// We don't need to make room for the PHI-defined values as they will be
- // lowered to the copies at the end of the corresponding predecessors and
- // occupies the same register with the corresponding PHI input value.
- // Nevertheless, we must add them to the Active to indicate their values
- // are available.
+ // lowered to the copies at the end of the corresponding predecessors
+ // and occupies the same register with the corresponding PHI input
+ // value. Nevertheless, we must add them to the Active to indicate their
+ // values are available.
for (auto D : I->defs()) {
Register R = D.getReg();
if (takeReg(R)) {
@@ -352,22 +357,23 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
LLVM_DEBUG(dbgs() << "\nActive set with uses reloaded:\n";
dumpRegSet(Active));
- RegisterSet ToSpill;
- limit(MBB, Active, Spilled, I, NumAvailableRegs, ToSpill);
- limit(MBB, Active, Spilled, std::next(I),
- NumAvailableRegs - getSizeInRegs(Defs), ToSpill);
+
+ limit(MBB, Active, Spilled, I, NumAvailableRegs);
+ unsigned NSpills = limit(MBB, Active, Spilled, std::next(I),
+ NumAvailableRegs - getSizeInRegs(Defs));
+
// T4->startTimer();
- for (auto R : ToSpill) {
- spillBefore(MBB, I, R);
- Spilled.insert(R);
- // FIXME: We'd want update LIS is we could!
- }
+
Active.insert(Defs.begin(), Defs.end());
// Add reloads for VRegs in Reloads before I
- dumpRegSet(Reloads);
- for (auto R : Reloads)
+ for (auto R : Reloads) {
+ LLVM_DEBUG(dbgs() << "\nReloading "; printVRegMaskPair(R);
+ dbgs() << "\n");
reloadBefore(MBB, I, R);
+ }
+
+ std::advance(I, NSpills);
// T4->stopTimer();
}
// Now, clear dead registers. We generally take care of trimming deads at the
@@ -487,13 +493,9 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
// We're about to insert N reloads at the end of the predecessor block.
// Make sure we have enough registers for N definitions or spill to make
// room for them.
- RegisterSet ToSpill;
- limit(*Pred, PE.ActiveSet, PE.SpillSet, Pred->end(),
- NumAvailableRegs - getSizeInRegs(ReloadInPred), ToSpill);
- for (auto R : ToSpill) {
- spillBefore(*Pred, Pred->end(), R);
- PE.SpillSet.insert(R);
- }
+ limit(*Pred, PE.ActiveSet, PE.SpillSet, Pred->getFirstTerminator(),
+ NumAvailableRegs - getSizeInRegs(ReloadInPred));
+
for (auto R : ReloadInPred) {
reloadAtEnd(*Pred, R);
// FIXME: Do we need to update sets?
@@ -649,8 +651,7 @@ void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
const TargetRegisterClass *RC = getRegClassForVregMaskPair(VMP, SubRegIdx);
int FI = getStackSlot(VMP);
Register NewVReg = MRI->createVirtualRegister(RC);
- TII->loadRegFromStackSlot(MBB, InsertBefore, NewVReg, FI, RC, TRI, NewVReg,
- SubRegIdx);
+ TII->loadRegFromStackSlot(MBB, InsertBefore, NewVReg, FI, RC, TRI, NewVReg);
// FIXME: dirty hack! To avoid further changing the TargetInstrInfo interface.
MachineInstr &ReloadMI = *(--InsertBefore);
LIS.InsertMachineInstrInMaps(ReloadMI);
@@ -662,24 +663,15 @@ void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
// then this, reloaded here.
SmallVector<MachineOperand*> ToUpdate;
for (auto &U : MRI->use_nodbg_operands(VMP.VReg)) {
- // MachineInstr *UseMI = U.getParent();
- // if (MDT.dominates(&ReloadMI, UseMI)) {
- // ToUpdate.push_back(&U);
- // } else if (UseMI->isPHI()) {
- // unsigned OpNo = U.getOperandNo();
- // MachineOperand MBBOp = UseMI->getOperand(++OpNo);
- // assert(MBBOp.isMBB() && "Not PHI instruction or malformed PHI!");
- // MachineBasicBlock *SourceMBB = MBBOp.getMBB();
- // if (SourceMBB == &MBB)
- // ToUpdate.push_back(&U);
- // }
-
-
if (SpillPoints.contains(VMP)) {
MachineInstr *UseMI = U.getParent();
MachineInstr *Spill = SpillPoints[VMP];
- if (UseMI != Spill && MDT.dominates(Spill, UseMI))
+ VRegMaskPair UseVMP(U, *TRI);
+ if (UseMI != Spill && MDT.dominates(Spill, UseMI) && UseVMP == VMP)
ToUpdate.push_back(&U);
+ } else {
+ llvm::report_fatal_error(
+ "We're going to reload VReg which has not been spilled!");
}
}
for (auto U : ToUpdate) {
@@ -713,16 +705,19 @@ void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
LIS.InsertMachineInstrInMaps(Spill);
if (LIS.hasInterval(VMP.VReg)) {
- LiveInterval &LI = LIS.getInterval(VMP.VReg);
- SlotIndex KillIdx = LIS.getInstructionIndex(Spill);
- auto LR = LI.find(KillIdx);
- if (LR != LI.end()) {
- SlotIndex Start = LR->start;
- SlotIndex End = LR->end;
- if (Start < KillIdx) {
- LI.removeSegment(KillIdx, End);
- }
- }
+
+ LIS.removeInterval(VMP.VReg);
+
+ // LiveInterval &LI = LIS.getInterval(VMP.VReg);
+ // SlotIndex KillIdx = LIS.getInstructionIndex(Spill);
+ // auto LR = LI.find(KillIdx);
+ // if (LR != LI.end()) {
+ // SlotIndex Start = LR->start;
+ // SlotIndex End = LR->end;
+ // if (Start < KillIdx) {
+ // LI.removeSegment(KillIdx, End);
+ // }
+ // }
}
SpillPoints[VMP] = &Spill;
}
@@ -744,12 +739,14 @@ unsigned AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
return MaxRP;
}
-void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
- RegisterSet &Spilled,
- MachineBasicBlock::iterator I, unsigned Limit,
- RegisterSet &ToSpill) {
+unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
+ RegisterSet &Spilled,
+ MachineBasicBlock::iterator I,
+ unsigned Limit) {
// T2->startTimer();
+ unsigned NumSpills = 0;
+
LLVM_DEBUG(dbgs() << "\nIn \"limit\" with Limit = " << Limit << "\n");
Active.remove_if([&](VRegMaskPair P) { return NU.isDead(MBB, I, P); });
@@ -760,12 +757,13 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
unsigned CurRP = getSizeInRegs(Active);
if (CurRP <= Limit) {
// T2->stopTimer();
- return;
+ return NumSpills;
}
+
sortRegSetAt(MBB, I, Active);
- LLVM_DEBUG(dbgs() << "\nActive set sorted at" << *I; dumpRegSet(Active));
+ RegisterSet ToSpill;
while (CurRP > Limit) {
auto P = Active.pop_back_val();
@@ -801,7 +799,14 @@ void AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
}
LLVM_DEBUG(dbgs() << "\nActive set after at the end of the \"limit\":\n";
dumpRegSet(Active));
+ for (auto R : ToSpill) {
+ LLVM_DEBUG(dbgs() << "\nSpilling "; printVRegMaskPair(R));
+ spillBefore(MBB, I, R);
+ NumSpills++;
+ Spilled.insert(R);
+ }
// T2->stopTimer();
+ return NumSpills;
}
unsigned AMDGPUSSASpiller::getSizeInRegs(const VRegMaskPair VMP) {
>From f715d7aff620297ed7be148eba93036558e63c1e Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Thu, 20 Feb 2025 00:04:49 +0100
Subject: [PATCH 22/46] SSA Spiller unnecessary SubReg parameter removed from
loadRegFromStackSlot
---
llvm/include/llvm/CodeGen/TargetInstrInfo.h | 2 +-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 8 ++++++--
llvm/lib/Target/AMDGPU/SIInstrInfo.cpp | 4 ++--
llvm/lib/Target/AMDGPU/SIInstrInfo.h | 3 +--
llvm/lib/Target/X86/X86InstrInfo.cpp | 2 +-
llvm/lib/Target/X86/X86InstrInfo.h | 3 +--
6 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index f39b327ebc018..58d8feb01c42b 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -1178,7 +1178,7 @@ class LLVM_ABI TargetInstrInfo : public MCInstrInfo {
Register DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI,
- Register VReg, unsigned SubRegIdx = 0) const {
+ Register VReg) const {
llvm_unreachable("Target didn't implement "
"TargetInstrInfo::loadRegFromStackSlot!");
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 5b2eed693bf92..39ba7f155828d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -182,6 +182,8 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
});
}
+ // Fills Active until reaches the NumAvailableRegs. If @Capacity is passed
+ // fills exactly this number of regs.
unsigned fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Capacity = 0);
@@ -218,6 +220,7 @@ AMDGPUSSASpiller::dumpRegSet(SetVector<VRegMaskPair> VMPs) {
dbgs() << "\n";
for (auto P : VMPs) {
printVRegMaskPair(P);
+ dbgs() << "\n";
}
dbgs() << "\n";
}
@@ -676,6 +679,7 @@ void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
}
for (auto U : ToUpdate) {
// FIXME: Do we always want "AtEndOfBlock"?
+ U->setSubReg(AMDGPU::NoRegister);
U->setReg(Updater.GetValueAtEndOfBlock(&MBB));
}
LIS.createAndComputeVirtRegInterval(NewVReg);
@@ -827,8 +831,8 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Capacity) {
unsigned Limit = Capacity ? Capacity : NumAvailableRegs;
auto &Active = RegisterMap[MBB.getNumber()].ActiveSet;
- unsigned Size = getSizeInRegs(Active);
- sortRegSetAt(MBB, MBB.begin(), S);
+ unsigned Size = Capacity ? 0 : getSizeInRegs(Active);
+ sortRegSetAt(MBB, MBB.getFirstNonPHI(), S);
for (auto VMP : S) {
unsigned RSize = getSizeInRegs(VMP);
if (Size + RSize <= Limit) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 5b9e2111c4d7e..6854881830d4f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -1886,7 +1886,7 @@ unsigned SIInstrInfo::getVectorRegSpillRestoreOpcode(
void SIInstrInfo::loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg, unsigned SubRegIdx) const {
+ const TargetRegisterInfo *TRI, Register VReg) const {
MachineFunction *MF = MBB.getParent();
SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
MachineFrameInfo &FrameInfo = MF->getFrameInfo();
@@ -1927,7 +1927,7 @@ void SIInstrInfo::loadRegFromStackSlot(
unsigned Opcode = getVectorRegSpillRestoreOpcode(VReg ? VReg : DestReg, RC,
SpillSize, RI, *MFI);
BuildMI(MBB, MI, DL, get(Opcode))
- .addReg(DestReg, RegState::Define, SubRegIdx)
+ .addReg(DestReg, RegState::Define)
.addFrameIndex(FrameIndex) // vaddr
.addReg(MFI->getStackPtrOffsetReg()) // scratch_offset
.addImm(0) // offset
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 344498a982b87..22c2bcc071bbb 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -294,8 +294,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, Register DestReg,
int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- unsigned SubRegIdx = 0) const override;
+ const TargetRegisterInfo *TRI, Register VReg) const override;
bool expandPostRAPseudo(MachineInstr &MI) const override;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index aefbe590bba35..9cdcb29125a46 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -4807,7 +4807,7 @@ void X86InstrInfo::storeRegToStackSlot(
void X86InstrInfo::loadRegFromStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg,
int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
- Register VReg, unsigned SubRegIdx) const {
+ Register VReg) const {
const MachineFunction &MF = *MBB.getParent();
const MachineFrameInfo &MFI = MF.getFrameInfo();
assert(MFI.getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 07fe1c01e1f00..e090543797498 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -478,8 +478,7 @@ class X86InstrInfo final : public X86GenInstrInfo {
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, Register DestReg,
int FrameIndex, const TargetRegisterClass *RC,
- const TargetRegisterInfo *TRI, Register VReg,
- unsigned SubRegIdx = 0) const override;
+ const TargetRegisterInfo *TRI, Register VReg) const override;
void loadStoreTileReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned Opc, Register Reg, int FrameIdx,
>From 9d58e7d968ed563bdf2229ee3bbfe40b262ecc04 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Wed, 28 May 2025 16:12:41 +0000
Subject: [PATCH 23/46] SSA Rebuild pass
---
llvm/lib/Target/AMDGPU/AMDGPU.h | 14 +-
llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def | 1 +
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 240 ++++++++++++++++++
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 4 +-
.../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp | 1 +
llvm/lib/Target/AMDGPU/CMakeLists.txt | 1 +
6 files changed, 258 insertions(+), 3 deletions(-)
create mode 100644 llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.h b/llvm/lib/Target/AMDGPU/AMDGPU.h
index 32797cb0393c6..5c8679933e2f7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.h
@@ -40,6 +40,8 @@ FunctionPass *createSIFoldOperandsLegacyPass();
FunctionPass *createSIPeepholeSDWALegacyPass();
FunctionPass *createSILowerI1CopiesLegacyPass();
FunctionPass *createAMDGPUSSASpillerLegacyPass();
+FunctionPass *createAMDGPURebuildSSALegacyPass();
+FunctionPass *createAMDGPURebuildSSAPass();
FunctionPass *createAMDGPUGlobalISelDivergenceLoweringPass();
FunctionPass *createSIShrinkInstructionsLegacyPass();
FunctionPass *createSILoadStoreOptimizerLegacyPass();
@@ -103,6 +105,13 @@ class AMDGPUSSASpillerPass : public PassInfoMixin<AMDGPUSSASpillerPass> {
MachineFunctionAnalysisManager &MFAM);
};
+class AMDGPURebuildSSAPass : public PassInfoMixin<AMDGPURebuildSSAPass> {
+public:
+ AMDGPURebuildSSAPass() = default;
+ PreservedAnalyses run(MachineFunction &MF,
+ MachineFunctionAnalysisManager &MFAM);
+};
+
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &);
void initializeAMDGPUAlwaysInlinePass(PassRegistry&);
@@ -210,7 +219,10 @@ extern char &SILowerI1CopiesLegacyID;
void initializeAMDGPUSSASpillerLegacyPass(PassRegistry &);
extern char &AMDGPUSSASpillerLegacyID;
-void initializeAMDGPUNextUseAnalysisWrapperPass(PassRegistry&);
+void initializeAMDGPURebuildSSALegacyPass(PassRegistry &);
+extern char &AMDGPURebuildSSALegacyID;
+
+void initializeAMDGPUNextUseAnalysisWrapperPass(PassRegistry &);
extern char &AMDGPUNextUseAnalysisID;
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
index 783257f4b267c..ad626867a4e5e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPassRegistry.def
@@ -142,6 +142,7 @@ MACHINE_FUNCTION_PASS("si-pre-allocate-wwm-regs", SIPreAllocateWWMRegsPass())
MACHINE_FUNCTION_PASS("si-pre-emit-peephole", SIPreEmitPeepholePass())
MACHINE_FUNCTION_PASS("si-shrink-instructions", SIShrinkInstructionsPass())
MACHINE_FUNCTION_PASS("amdgpu-ssa-spiller", AMDGPUSSASpillerPass())
+MACHINE_FUNCTION_PASS("amdgpu-rebuild-ssa", AMDGPURebuildSSAPass())
#undef MACHINE_FUNCTION_PASS
#define DUMMY_MACHINE_FUNCTION_PASS(NAME, CREATE_PASS)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
new file mode 100644
index 0000000000000..ba4c81ef60fc5
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -0,0 +1,240 @@
+#include "AMDGPU.h"
+#include "llvm/CodeGen/LiveIntervals.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Passes/PassPlugin.h"
+#include "llvm/Support/GenericIteratedDominanceFrontier.h"
+#include "GCNSubtarget.h"
+
+#include <stack>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "amdgpu-rebuild-ssa"
+
+namespace {
+
+class AMDGPURebuildSSALegacy : public MachineFunctionPass {
+ LiveIntervals *LIS;
+ MachineDominatorTree *MDT;
+ const SIInstrInfo *TII;
+ const SIRegisterInfo *TRI;
+ MachineRegisterInfo *MRI;
+
+ SetVector<unsigned> CrossBlockVRegs;
+ DenseMap<unsigned, SmallPtrSet<MachineBasicBlock *, 8>> DefBlocks;
+ DenseMap<unsigned, SmallPtrSet<MachineBasicBlock *, 8>> LiveInBlocks;
+ DenseMap<unsigned, SmallSet<unsigned, 4>> PHINodes;
+ DenseMap<unsigned, std::stack<unsigned>> VregNames;
+ DenseSet<unsigned> DefSeen;
+
+ void collectCrossBlockVRegs(MachineFunction &MF);
+ void findPHINodesPlacement(const SmallPtrSetImpl<MachineBasicBlock *> &LiveInBlocks,
+ const SmallPtrSetImpl<MachineBasicBlock *> &DefBlocks,
+ SmallVectorImpl<MachineBasicBlock *> &PHIBlocks) {
+
+ IDFCalculatorBase<MachineBasicBlock, false> IDF(MDT->getBase());
+
+ IDF.setLiveInBlocks(LiveInBlocks);
+ IDF.setDefiningBlocks(DefBlocks);
+ IDF.calculate(PHIBlocks);
+ }
+
+ void renameVRegs(MachineBasicBlock &MBB) {
+ for (auto &PHI : MBB.phis()) {
+ Register Res = PHI.getOperand(0).getReg();
+ const TargetRegisterClass *RC = TRI->getRegClass(Res);
+ Register NewVReg = MRI->createVirtualRegister(RC);
+ PHI.getOperand(0).setReg(NewVReg);
+ VregNames[Res].push(NewVReg);
+ DefSeen.insert(NewVReg);
+ }
+ for (auto &I : make_range(MBB.getFirstNonPHI(), MBB.end())) {
+
+ for (auto Op : I.uses()) {
+ if (Op.isReg() && Op.getReg().isVirtual()) {
+ unsigned VReg = Op.getReg();
+ if (VregNames[VReg].empty()) {
+ // If no new name is available, use the original VReg.
+ continue;
+ }
+ unsigned NewVReg = VregNames[VReg].top();
+ //VregNames[VReg].pop();
+ Op.setReg(NewVReg);
+ }
+ }
+
+ for (auto Op : I.defs()) {
+ if (Op.getReg().isVirtual()) {
+ unsigned VReg = Op.getReg();
+ if (DefSeen.contains(VReg)) {
+ const TargetRegisterClass *RC = TRI->getRegClass(VReg);
+ Register NewVReg = MRI->createVirtualRegister(RC);
+ Op.setReg(NewVReg);
+ VregNames[VReg].push(NewVReg);
+ } else {
+ DefSeen.insert(VReg);
+ }
+ }
+ }
+ }
+
+ for (auto Succ : successors(&MBB)) {
+ for (auto &PHI : Succ->phis()) {
+ Register Res = PHI.getOperand(0).getReg();
+ if (VregNames[Res].empty()) {
+ PHI.addOperand(MachineOperand::CreateReg(Res, false));
+ } else {
+ PHI.addOperand(
+ MachineOperand::CreateReg(VregNames[Res].top(), false));
+ }
+ PHI.addOperand(MachineOperand::CreateMBB(&MBB));
+ }
+ }
+ // recurse to the succs in DomTree
+ DomTreeNodeBase<MachineBasicBlock> *Node = MDT->getNode(&MBB);
+ for (auto *Child : Node->children()) {
+ MachineBasicBlock *ChildMBB = Child->getBlock();
+ // Process child in the dominator tree
+ renameVRegs(*ChildMBB);
+ }
+
+ for (auto &I : MBB) {
+ for (auto Op : I.defs()) {
+ if (Op.getReg().isVirtual()) {
+ Register VReg = Op.getReg();
+ VregNames[VReg].pop();
+ }
+ }
+ }
+ }
+
+public:
+ static char ID;
+ AMDGPURebuildSSALegacy() : MachineFunctionPass(ID) {
+ initializeAMDGPURebuildSSALegacyPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequiredTransitiveID(MachineDominatorsID);
+ AU.addPreservedID(MachineDominatorsID);
+ AU.addRequired<LiveIntervalsWrapperPass>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+};
+
+} // end anonymous namespace
+
+void AMDGPURebuildSSALegacy::collectCrossBlockVRegs(MachineFunction &MF) {
+ for (auto &MBB : MF) {
+ SetVector<unsigned> Killed;
+ for (auto &I : MBB) {
+ for (auto Op : I.uses()) {
+ if (Op.isReg() && Op.getReg().isVirtual() &&
+ !Killed.contains(Op.getReg())) {
+ CrossBlockVRegs.insert(Op.getReg());
+ LiveInBlocks[Op.getReg()].insert(&MBB);
+ }
+ }
+ for (auto Op : I.defs()) {
+ if (Op.isReg() && Op.getReg().isVirtual()) {
+ Killed.insert(Op.getReg());
+ DefBlocks[Op.getReg()].insert(&MBB);
+ }
+ }
+ }
+ }
+}
+
+bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
+ LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
+ MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
+ TII = MF.getSubtarget<GCNSubtarget>().getInstrInfo();
+ MRI = &MF.getRegInfo();
+ TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
+
+ CrossBlockVRegs.clear();
+ DefBlocks.clear();
+ LiveInBlocks.clear();
+ PHINodes.clear();
+ VregNames.clear();
+ DefSeen.clear();
+ // for (auto &MBB : MF) {
+ // PHINodes[MBB.getNumber()] = SmallSet<unsigned, 4>();
+ // }
+ // Collect all cross-block virtual registers.
+ // This includes registers that are live-in to the function, and registers
+ // that are defined in multiple blocks.
+ // We will insert PHI nodes for these registers.
+ collectCrossBlockVRegs(MF);
+ for (auto VReg : CrossBlockVRegs) {
+ SmallVector<MachineBasicBlock *> PHIBlocks;
+ findPHINodesPlacement(LiveInBlocks[VReg], DefBlocks[VReg], PHIBlocks);
+ for (auto MBB : PHIBlocks) {
+ if (!PHINodes[MBB->getNumber()].contains(VReg)) {
+ // Insert PHI for VReg. Don't use new VReg here as we'll replace them in
+ // renaming phase.
+ BuildMI(*MBB, MBB->begin(), DebugLoc(), TII->get(TargetOpcode::PHI))
+ .addReg(VReg, RegState::Define);
+ PHINodes[MBB->getNumber()].insert(VReg);
+ }
+ }
+ }
+
+ return false;
+}
+
+char AMDGPURebuildSSALegacy::ID = 0;
+
+INITIALIZE_PASS_BEGIN(AMDGPURebuildSSALegacy, DEBUG_TYPE, "AMDGPU Rebuild SSA",
+ false, false)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass)
+INITIALIZE_PASS_END(AMDGPURebuildSSALegacy, DEBUG_TYPE, "AMDGPU Rebuild SSA",
+ false, false)
+
+// Legacy PM registration
+FunctionPass *llvm::createAMDGPURebuildSSALegacyPass() {
+ return new AMDGPURebuildSSALegacy();
+}
+
+PreservedAnalyses
+llvm::AMDGPURebuildSSAPass::run(MachineFunction &MF,
+ MachineFunctionAnalysisManager &MFAM) {
+ AMDGPURebuildSSALegacy Impl;
+ bool Changed = Impl.runOnMachineFunction(MF);
+ if (!Changed)
+ return PreservedAnalyses::all();
+
+ // TODO: We could detect CFG changed.
+ auto PA = getMachineFunctionPassPreservedAnalyses();
+ return PA;
+}
+
+llvm::PassPluginLibraryInfo getAMDGPURebuildSSAPassPluginInfo() {
+ return {LLVM_PLUGIN_API_VERSION, "AMDGPURebuildSSA", LLVM_VERSION_STRING,
+ [](PassBuilder &PB) {
+ PB.registerPipelineParsingCallback(
+ [](StringRef Name, MachineFunctionPassManager &PM,
+ ArrayRef<PassBuilder::PipelineElement>) {
+ if (Name == "amdgpu-rebuild-ssa") {
+ PM.addPass(AMDGPURebuildSSAPass());
+ return true;
+ }
+ return false;
+ });
+ }};
+}
+
+// Expose the pass to LLVM’s pass manager infrastructure
+extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo
+llvmGetPassPluginInfo() {
+ return getAMDGPURebuildSSAPassPluginInfo();
+}
\ No newline at end of file
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 39ba7f155828d..7f0d3165b775b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -943,7 +943,7 @@ FunctionPass *llvm::createAMDGPUSSASpillerLegacyPass() {
return new AMDGPUSSASpillerLegacy();
}
-llvm::PassPluginLibraryInfo getMyNewMachineFunctionPassPluginInfo() {
+llvm::PassPluginLibraryInfo getAMDGPUSSASpillerPassPluginInfo() {
return {LLVM_PLUGIN_API_VERSION, "AMDGPUSSASpiller",
LLVM_VERSION_STRING, [](PassBuilder &PB) {
PB.registerPipelineParsingCallback(
@@ -961,5 +961,5 @@ llvm::PassPluginLibraryInfo getMyNewMachineFunctionPassPluginInfo() {
// Expose the pass to LLVM’s pass manager infrastructure
extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo
llvmGetPassPluginInfo() {
- return getMyNewMachineFunctionPassPluginInfo();
+ return getAMDGPUSSASpillerPassPluginInfo();
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 2e27343e032e3..aad4699a635e3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -545,6 +545,7 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
initializeGCNDPPCombineLegacyPass(*PR);
initializeSILowerI1CopiesLegacyPass(*PR);
initializeAMDGPUSSASpillerLegacyPass(*PR);
+ initializeAMDGPURebuildSSALegacyPass(*PR);
initializeAMDGPUGlobalISelDivergenceLoweringPass(*PR);
initializeAMDGPURegBankSelectPass(*PR);
initializeAMDGPURegBankLegalizePass(*PR);
diff --git a/llvm/lib/Target/AMDGPU/CMakeLists.txt b/llvm/lib/Target/AMDGPU/CMakeLists.txt
index 4bb3112d3add9..2e03f4c6b852a 100644
--- a/llvm/lib/Target/AMDGPU/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/CMakeLists.txt
@@ -110,6 +110,7 @@ add_llvm_target(AMDGPUCodeGen
AMDGPUSetWavePriority.cpp
AMDGPUSplitModule.cpp
AMDGPUSSASpiller.cpp
+ AMDGPURebuildSSA.cpp
AMDGPUNextUseAnalysis.cpp
AMDGPUSubtarget.cpp
AMDGPUTargetMachine.cpp
>From a03cd3ef1503431cee5138d6f6a63935c354df96 Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Thu, 29 May 2025 00:34:12 +0200
Subject: [PATCH 24/46] SSA Rebuild pass. Fixes.
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 59 +++++++++++++++++----
1 file changed, 50 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index ba4c81ef60fc5..29c3e2e75beff 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -30,6 +30,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
DenseMap<unsigned, SmallPtrSet<MachineBasicBlock *, 8>> DefBlocks;
DenseMap<unsigned, SmallPtrSet<MachineBasicBlock *, 8>> LiveInBlocks;
DenseMap<unsigned, SmallSet<unsigned, 4>> PHINodes;
+ DenseMap<MachineInstr *, unsigned> PHIMap;
DenseMap<unsigned, std::stack<unsigned>> VregNames;
DenseSet<unsigned> DefSeen;
@@ -48,7 +49,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
void renameVRegs(MachineBasicBlock &MBB) {
for (auto &PHI : MBB.phis()) {
Register Res = PHI.getOperand(0).getReg();
- const TargetRegisterClass *RC = TRI->getRegClass(Res);
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, Res);
Register NewVReg = MRI->createVirtualRegister(RC);
PHI.getOperand(0).setReg(NewVReg);
VregNames[Res].push(NewVReg);
@@ -56,7 +57,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
}
for (auto &I : make_range(MBB.getFirstNonPHI(), MBB.end())) {
- for (auto Op : I.uses()) {
+ for (auto &Op : I.uses()) {
if (Op.isReg() && Op.getReg().isVirtual()) {
unsigned VReg = Op.getReg();
if (VregNames[VReg].empty()) {
@@ -69,11 +70,11 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
}
}
- for (auto Op : I.defs()) {
+ for (auto &Op : I.defs()) {
if (Op.getReg().isVirtual()) {
unsigned VReg = Op.getReg();
if (DefSeen.contains(VReg)) {
- const TargetRegisterClass *RC = TRI->getRegClass(VReg);
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
Register NewVReg = MRI->createVirtualRegister(RC);
Op.setReg(NewVReg);
VregNames[VReg].push(NewVReg);
@@ -86,7 +87,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
for (auto Succ : successors(&MBB)) {
for (auto &PHI : Succ->phis()) {
- Register Res = PHI.getOperand(0).getReg();
+ Register Res = PHIMap[&PHI];
if (VregNames[Res].empty()) {
PHI.addOperand(MachineOperand::CreateReg(Res, false));
} else {
@@ -108,7 +109,8 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
for (auto Op : I.defs()) {
if (Op.getReg().isVirtual()) {
Register VReg = Op.getReg();
- VregNames[VReg].pop();
+ if (!VregNames[VReg].empty())
+ VregNames[VReg].pop();
}
}
}
@@ -174,20 +176,59 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
// that are defined in multiple blocks.
// We will insert PHI nodes for these registers.
collectCrossBlockVRegs(MF);
+
+ LLVM_DEBUG(dbgs() << "##### Virt regs live cross block ##################\n";
+ for (auto VReg : CrossBlockVRegs) {
+ dbgs() << Register::virtReg2Index(VReg) << " ";
+ } dbgs()
+ << "\n");
+
for (auto VReg : CrossBlockVRegs) {
SmallVector<MachineBasicBlock *> PHIBlocks;
+
+ LLVM_DEBUG(
+ dbgs() << "findPHINodesPlacement input:\nVreg: "
+ << Register::virtReg2Index(VReg) << "\n";
+ dbgs() << "Def Blocks: \n"; for (auto MBB : DefBlocks[VReg]) {
+ dbgs() << MBB->getName() << "." << MBB->getNumber() << " ";
+ } dbgs() << "\nLiveIn Blocks: \n";
+ for (auto MBB : LiveInBlocks[VReg]) {
+ dbgs() << MBB->getName() << "." << MBB->getNumber() << " ";
+ } dbgs()
+ << "\n");
+
findPHINodesPlacement(LiveInBlocks[VReg], DefBlocks[VReg], PHIBlocks);
+ LLVM_DEBUG(dbgs() << "\nBlocks to insert PHI nodes:\n";
+ for (auto MBB : PHIBlocks) {
+ dbgs() << MBB->getName() << "." << MBB->getNumber() << " ";
+ } dbgs()
+ << "\n");
for (auto MBB : PHIBlocks) {
if (!PHINodes[MBB->getNumber()].contains(VReg)) {
- // Insert PHI for VReg. Don't use new VReg here as we'll replace them in
- // renaming phase.
- BuildMI(*MBB, MBB->begin(), DebugLoc(), TII->get(TargetOpcode::PHI))
+ // Insert PHI for VReg. Don't use new VReg here as we'll replace them
+ // in renaming phase.
+ auto PHINode = BuildMI(*MBB, MBB->begin(), DebugLoc(), TII->get(TargetOpcode::PHI))
.addReg(VReg, RegState::Define);
PHINodes[MBB->getNumber()].insert(VReg);
+ PHIMap[PHINode] = VReg;
}
}
}
+ // Rename virtual registers in the basic block.
+ renameVRegs(MF.front());
+ LLVM_DEBUG(dbgs() << "##### Vreg names after renaming ##################\n";
+ for (auto &Pair : VregNames) {
+ dbgs() << Register::virtReg2Index(Pair.first) << ": ";
+ if (Pair.second.empty()) {
+ dbgs() << "empty";
+ } else {
+ dbgs() << Pair.second.top();
+ }
+ dbgs() << "\n";
+ } dbgs()
+ << "\n");
+
return false;
}
>From d92452b6bcf35aed20dde11f2db6aa036aaae370 Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Wed, 11 Jun 2025 19:22:50 +0200
Subject: [PATCH 25/46] SSA Rebuild pass finished. SSARAUtils added. A lot of
bugfixing done
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 9 +-
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 19 +-
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 150 ++++++++++----
llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h | 54 +++++
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 193 ++++++++++++------
5 files changed, 322 insertions(+), 103 deletions(-)
create mode 100644 llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 969049d939d61..c2a188eb79fc7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -111,12 +111,13 @@ void NextUseResult::analyze(const MachineFunction &MF) {
if (ValueSrc->getNumber() == MBB->getNumber()) {
// We assume that all the PHIs have zero distance from the
// succ end!
- Curr.insert({U.getReg(), LaneBitmask::getAll()}, 0);
+ Curr.insert(VRegMaskPair(U, TRI, MRI), 0);
}
}
}
- for (auto &U : PHI.defs())
- Curr.clear({U.getReg(), LaneBitmask::getAll()});
+ for (auto &U : PHI.defs()) {
+ Curr.clear(VRegMaskPair(U, TRI, MRI));
+ }
}
}
@@ -142,7 +143,7 @@ void NextUseResult::analyze(const MachineFunction &MF) {
for (auto &MO : MI.operands()) {
if (MO.isReg() && MO.getReg().isVirtual()) {
- VRegMaskPair P(MO, *TRI);
+ VRegMaskPair P(MO, TRI, MRI);
if (MO.isUse()) {
Curr.insert(P, 0);
UsedInBlock[MBB->getNumber()].insert(P);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 5cc0d8faf154b..7ff76b52c2d49 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -16,6 +16,7 @@
#include "SIRegisterInfo.h"
#include "GCNSubtarget.h"
+#include "AMDGPUSSARAUtils.h"
#include <algorithm>
#include <limits>
@@ -33,15 +34,16 @@ struct VRegMaskPair {
VRegMaskPair(Register VReg, LaneBitmask LaneMask)
: VReg(VReg), LaneMask(LaneMask) {}
- VRegMaskPair(const MachineOperand MO, const TargetRegisterInfo &TRI) {
+ VRegMaskPair(const MachineOperand MO, const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI) {
assert(MO.isReg() && "Not a register operand!");
Register R = MO.getReg();
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, R);
assert(R.isVirtual() && "Not a virtual register!");
VReg = R;
- LaneMask = LaneBitmask::getAll();
+ LaneMask = getFullMaskForRC(*RC, TRI);
unsigned subRegIndex = MO.getSubReg();
if (subRegIndex) {
- LaneMask = TRI.getSubRegIndexLaneMask(subRegIndex);
+ LaneMask = TRI->getSubRegIndexLaneMask(subRegIndex);
}
}
@@ -305,6 +307,17 @@ class NextUseResult {
const VRegMaskPair VMP) {
if (!VMP.VReg.isVirtual())
report_fatal_error("Only virtual registers allowed!\n", true);
+ // FIXME: We use the same Infinity value to indicate both invalid distance
+ // and too long for out of block values. It is okay if the use out of block
+ // is at least one instruction further then the end of loop exit. In this
+ // case we have a distance Infinity + 1 and hence register is not considered
+ // dead. What if the register is defined by the last instruction in the loop
+ // exit block and out of loop use is in PHI? By design the dist of all PHIs
+ // from the beginning of block are ZERO and hence the distance of
+ // out-of-the-loop use will be exactly Infinity So, the register will be
+ // mistakenly considered DEAD! On another hand, any predecessor of the block
+ // containing PHI must have a branch as the last instruction. In this case
+ // the current design works.
return I == MBB.end() ? getNextUseDistance(MBB, VMP) == Infinity
: getNextUseDistance(I, VMP) == Infinity;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index 29c3e2e75beff..971b411f33b75 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -1,4 +1,5 @@
#include "AMDGPU.h"
+#include "GCNSubtarget.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -9,7 +10,7 @@
#include "llvm/Passes/PassBuilder.h"
#include "llvm/Passes/PassPlugin.h"
#include "llvm/Support/GenericIteratedDominanceFrontier.h"
-#include "GCNSubtarget.h"
+#include "AMDGPUSSARAUtils.h"
#include <stack>
@@ -26,12 +27,21 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
const SIRegisterInfo *TRI;
MachineRegisterInfo *MRI;
+ typedef struct {
+ Register CurName;
+ LaneBitmask PrevMask;
+ unsigned PrevSubRegIdx;
+ MachineInstr *DefMI;
+ } CurVRegInfo;
+
+ using VRegDefStack = std::vector<CurVRegInfo>;
+
SetVector<unsigned> CrossBlockVRegs;
DenseMap<unsigned, SmallPtrSet<MachineBasicBlock *, 8>> DefBlocks;
DenseMap<unsigned, SmallPtrSet<MachineBasicBlock *, 8>> LiveInBlocks;
DenseMap<unsigned, SmallSet<unsigned, 4>> PHINodes;
DenseMap<MachineInstr *, unsigned> PHIMap;
- DenseMap<unsigned, std::stack<unsigned>> VregNames;
+ DenseMap<unsigned, VRegDefStack> VregNames;
DenseSet<unsigned> DefSeen;
void collectCrossBlockVRegs(MachineFunction &MF);
@@ -52,21 +62,84 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, Res);
Register NewVReg = MRI->createVirtualRegister(RC);
PHI.getOperand(0).setReg(NewVReg);
- VregNames[Res].push(NewVReg);
+ VregNames[Res].push_back(
+ {NewVReg, getFullMaskForRC(*RC, TRI), AMDGPU::NoRegister, &PHI});
DefSeen.insert(NewVReg);
}
for (auto &I : make_range(MBB.getFirstNonPHI(), MBB.end())) {
-
+
+ // TODO: Need to support the RENAIMED set to avoid replacing the registers
+ // which were not renamed in uses!
for (auto &Op : I.uses()) {
if (Op.isReg() && Op.getReg().isVirtual()) {
unsigned VReg = Op.getReg();
- if (VregNames[VReg].empty()) {
- // If no new name is available, use the original VReg.
+ assert(!VregNames[VReg].empty() &&
+ "Error: use does not dominated by definition!\n");
+ CurVRegInfo VRInfo = VregNames[VReg].back();
+ unsigned CurVReg = VRInfo.CurName;
+ // Does it meet the TODO above ?
+ if (CurVReg == VReg)
continue;
+ unsigned DefSubregIdx = VRInfo.PrevSubRegIdx;
+ LaneBitmask DefMask = VRInfo.PrevMask;
+ MachineInstr *DefMI = VregNames[VReg].back().DefMI;
+ MachineOperand *DefOp = DefMI->findRegisterDefOperand(CurVReg,
+ TRI);
+
+ // LaneBitmask DefMask = getOperandLaneMask(*DefOp);
+ dbgs() << "Def mask : " << PrintLaneMask(DefMask) << "\n";
+ LaneBitmask UseMask = getOperandLaneMask(Op, TRI, MRI);
+ dbgs() << "Use mask : " << PrintLaneMask(UseMask) << "\n";
+ LaneBitmask UndefSubRegs = UseMask & ~DefMask;
+ dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n";
+
+ unsigned SubRegIdx = AMDGPU::NoRegister;
+
+ if (UndefSubRegs.any()) {
+ // The closest Def defines not all the subregs used here!
+ SmallVector<std::tuple<unsigned, unsigned, unsigned>> RegSeqOps;
+
+ RegSeqOps.push_back({CurVReg, DefOp->getSubReg(), DefSubregIdx});
+
+ VRegDefStack VregDefs = VregNames[VReg];
+
+ VRegDefStack::reverse_iterator It = ++VregDefs.rbegin();
+ for (; It != VregDefs.rend(); ++It) {
+ // auto CurDef = It->CurDefMI;
+ auto R = It->CurName;
+ // auto CurDefOp = CurDef->findRegisterDefOperand(R, TRI);
+ LaneBitmask DefMask = It->PrevMask;
+ dbgs() << "Lanes defined for VReg before renaming : "
+ << PrintLaneMask(DefMask) << "\n";
+ LaneBitmask CurDefinedBits = DefMask & UndefSubRegs;
+ dbgs() << "Defined bits are : " << PrintLaneMask(CurDefinedBits)
+ << "\n";
+
+ if (unsigned SubRegIdx = getSubRegIndexForLaneMask(CurDefinedBits, TRI))
+ RegSeqOps.push_back({R, SubRegIdx, SubRegIdx});
+ // clear subregs for which definition is found
+ UndefSubRegs &= ~CurDefinedBits;
+ dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n";
+ if (UndefSubRegs.none())
+ break;
+ }
+ // All subreg defs are found. Insert REG_SEQUENCE.
+ auto *RC = TRI->getRegClassForOperandReg(*MRI, Op);
+ CurVReg = MRI->createVirtualRegister(RC);
+ auto RS = BuildMI(MBB, I, I.getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE),
+ CurVReg);
+ for (auto O : RegSeqOps) {
+ auto [R, SrcSubreg, DstSubreg] = O;
+ RS.addReg(R, 0, SrcSubreg);
+ RS.addImm(DstSubreg);
+ }
+ } else {
+ if ((DefMask | UseMask) != UseMask) {
+ SubRegIdx = getSubRegIndexForLaneMask(UseMask & DefMask, TRI);
+ }
}
- unsigned NewVReg = VregNames[VReg].top();
- //VregNames[VReg].pop();
- Op.setReg(NewVReg);
+ Op.setReg(CurVReg);
+ Op.setSubReg(SubRegIdx);
}
}
@@ -74,11 +147,21 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
if (Op.getReg().isVirtual()) {
unsigned VReg = Op.getReg();
if (DefSeen.contains(VReg)) {
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
+ const TargetRegisterClass *RC =
+ TRI->getRegClassForOperandReg(*MRI, Op);
Register NewVReg = MRI->createVirtualRegister(RC);
- Op.setReg(NewVReg);
- VregNames[VReg].push(NewVReg);
+ VregNames[VReg].push_back({NewVReg,
+ getOperandLaneMask(Op, TRI, MRI),
+ Op.getSubReg(), &I});
+
+ Op.ChangeToRegister(NewVReg, true, false, false, false, false);
+ Op.setSubReg(AMDGPU::NoRegister);
+ LLVM_DEBUG(dbgs()
+ << "Renaming VReg: " << Register::virtReg2Index(VReg)
+ << " to " << Register::virtReg2Index(NewVReg) << "\n");
} else {
+ VregNames[VReg].push_back(
+ {VReg, getOperandLaneMask(Op, TRI, MRI), Op.getSubReg(), &I});
DefSeen.insert(VReg);
}
}
@@ -87,12 +170,17 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
for (auto Succ : successors(&MBB)) {
for (auto &PHI : Succ->phis()) {
- Register Res = PHIMap[&PHI];
- if (VregNames[Res].empty()) {
- PHI.addOperand(MachineOperand::CreateReg(Res, false));
+ Register VReg = PHIMap[&PHI];
+ if (VregNames[VReg].empty()) {
+ PHI.addOperand(MachineOperand::CreateReg(VReg, false, false, false,
+ false, false));
} else {
- PHI.addOperand(
- MachineOperand::CreateReg(VregNames[Res].top(), false));
+ CurVRegInfo VRInfo = VregNames[VReg].back();
+ MachineInstr *DefMI = VregNames[VReg].back().DefMI;
+ MachineOperand *DefOp = DefMI->findRegisterDefOperand(VRInfo.CurName, TRI);
+ PHI.addOperand(MachineOperand::CreateReg(VRInfo.CurName, false, false,
+ false, false, false, false,
+ DefOp->getSubReg()));
}
PHI.addOperand(MachineOperand::CreateMBB(&MBB));
}
@@ -110,7 +198,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
if (Op.getReg().isVirtual()) {
Register VReg = Op.getReg();
if (!VregNames[VReg].empty())
- VregNames[VReg].pop();
+ VregNames[VReg].pop_back();
}
}
}
@@ -142,7 +230,6 @@ void AMDGPURebuildSSALegacy::collectCrossBlockVRegs(MachineFunction &MF) {
if (Op.isReg() && Op.getReg().isVirtual() &&
!Killed.contains(Op.getReg())) {
CrossBlockVRegs.insert(Op.getReg());
- LiveInBlocks[Op.getReg()].insert(&MBB);
}
}
for (auto Op : I.defs()) {
@@ -168,9 +255,7 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
PHINodes.clear();
VregNames.clear();
DefSeen.clear();
- // for (auto &MBB : MF) {
- // PHINodes[MBB.getNumber()] = SmallSet<unsigned, 4>();
- // }
+
// Collect all cross-block virtual registers.
// This includes registers that are live-in to the function, and registers
// that are defined in multiple blocks.
@@ -185,6 +270,11 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
for (auto VReg : CrossBlockVRegs) {
SmallVector<MachineBasicBlock *> PHIBlocks;
+ for (auto &MBB : MF) {
+ LiveRange &LR = LIS->getInterval(VReg);
+ if (LIS->isLiveInToMBB(LR, &MBB))
+ LiveInBlocks[VReg].insert(&MBB);
+ }
LLVM_DEBUG(
dbgs() << "findPHINodesPlacement input:\nVreg: "
@@ -217,19 +307,9 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
// Rename virtual registers in the basic block.
renameVRegs(MF.front());
- LLVM_DEBUG(dbgs() << "##### Vreg names after renaming ##################\n";
- for (auto &Pair : VregNames) {
- dbgs() << Register::virtReg2Index(Pair.first) << ": ";
- if (Pair.second.empty()) {
- dbgs() << "empty";
- } else {
- dbgs() << Pair.second.top();
- }
- dbgs() << "\n";
- } dbgs()
- << "\n");
-
- return false;
+ MF.getProperties().set(MachineFunctionProperties::Property::IsSSA);
+ MF.getProperties().reset(MachineFunctionProperties::Property ::NoPHIs);
+ return MRI->isSSA();
}
char AMDGPURebuildSSALegacy::ID = 0;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h b/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
new file mode 100644
index 0000000000000..dcb187da928b2
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
@@ -0,0 +1,54 @@
+//===- AMDGPUNextUseAnalysis.h ----------------------------------------*- C++-
+//*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AMDGPU_SSA_RA_UTILS_H
+#define LLVM_LIB_TARGET_AMDGPU_SSA_RA_UTILS_H
+
+#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
+#include "SIRegisterInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+using namespace llvm;
+
+inline LaneBitmask getFullMaskForRC(const TargetRegisterClass &RC,
+ const SIRegisterInfo *TRI) {
+ unsigned Size = TRI->getRegSizeInBits(RC);
+ uint64_t IntMask = LaneBitmask::getAll().getAsInteger();
+ return LaneBitmask(IntMask >> (LaneBitmask::BitWidth - Size / 16));
+}
+
+inline LaneBitmask getFullMaskForRegOp(const MachineOperand &MO,
+ const SIRegisterInfo *TRI,
+ MachineRegisterInfo *MRI) {
+ assert(MO.isReg() && MO.getReg().isVirtual() &&
+ "Error: MachineOperand must be a virtual register!\n");
+ const TargetRegisterClass *RC = TRI->getRegClassForOperandReg(*MRI, MO);
+ return getFullMaskForRC(*RC, TRI);
+}
+
+inline LaneBitmask getOperandLaneMask(const MachineOperand &MO,
+ const SIRegisterInfo *TRI,
+ MachineRegisterInfo *MRI) {
+ assert(MO.isReg() && MO.getReg().isVirtual() &&
+ "Error: Only virtual register allowed!\n");
+ if (MO.getSubReg())
+ return TRI->getSubRegIndexLaneMask(MO.getSubReg());
+ return getFullMaskForRegOp(MO, TRI, MRI);
+}
+
+inline unsigned getSubRegIndexForLaneMask(LaneBitmask Mask,
+ const SIRegisterInfo *TRI) {
+ for (unsigned Idx = 1; Idx < TRI->getNumSubRegIndices(); ++Idx) {
+ if (TRI->getSubRegIndexLaneMask(Idx) == Mask)
+ return Idx;
+ }
+ return AMDGPU::NoRegister;
+}
+#endif // LLVM_LIB_TARGET_AMDGPU_SSA_RA_UTILS_H
\ No newline at end of file
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 7f0d3165b775b..3c26a842804a1 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -14,6 +14,7 @@
#include "llvm/Target/TargetMachine.h"
#include "AMDGPUNextUseAnalysis.h"
+#include "AMDGPUSSARAUtils.h"
#include "GCNRegPressure.h"
using namespace llvm;
@@ -41,6 +42,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
DenseMap<VRegMaskPair, unsigned> Virt2StackSlotMap;
DenseMap<VRegMaskPair, MachineInstr *> SpillPoints;
+ DenseSet<unsigned> ProcessedBlocks;
LLVM_ATTRIBUTE_NOINLINE void dumpRegSet(SetVector<VRegMaskPair> VMPs);
@@ -54,10 +56,11 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
return SS;
}
+ // return existing stack slot if any or assigns the new one
unsigned assignVirt2StackSlot(VRegMaskPair VMP) {
assert(VMP.VReg.isVirtual());
- assert(!Virt2StackSlotMap.contains(VMP) &&
- "attempt to assign stack slot to already spilled register");
+ if (Virt2StackSlotMap.contains(VMP))
+ return Virt2StackSlotMap[VMP];
const TargetRegisterClass *RC = MRI->getRegClass(VMP.VReg);
return Virt2StackSlotMap[VMP] = createSpillSlot(RC);
}
@@ -227,15 +230,15 @@ AMDGPUSSASpiller::dumpRegSet(SetVector<VRegMaskPair> VMPs) {
LLVM_ATTRIBUTE_NOINLINE void
AMDGPUSSASpiller::printVRegMaskPair(const VRegMaskPair P) {
- SmallVector<unsigned> Idxs;
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, P.VReg);
- bool HasSubReg = TRI->getCoveringSubRegIndexes(*MRI, RC, P.LaneMask, Idxs);
+ LaneBitmask FullMask = getFullMaskForRC(*RC, TRI);
dbgs() << "Vreg: [";
- if (HasSubReg)
- for (auto i : Idxs)
- dbgs() << printReg(P.VReg, TRI, i, MRI) << "] ";
- else
+ if (P.LaneMask == FullMask) {
dbgs() << printReg(P.VReg) << "] ";
+ } else {
+ unsigned SubRegIndex = getSubRegIndexForLaneMask(P.LaneMask, TRI);
+ dbgs() << printReg(P.VReg, TRI, SubRegIndex, MRI) << "] ";
+ }
}
AMDGPUSSASpiller::SpillInfo &
@@ -260,6 +263,7 @@ void AMDGPUSSASpiller::processFunction(MachineFunction &MF) {
connectToPredecessors(*MBB);
// T3->stopTimer();
processBlock(*MBB);
+ ProcessedBlocks.insert(MBB->getNumber());
// dump();
// We process loop blocks twice: once with Spill/Active sets of
// loop latch blocks unknown, and then again as soon as the latch blocks
@@ -274,6 +278,7 @@ void AMDGPUSSASpiller::processFunction(MachineFunction &MF) {
PostponedLoopLatches.erase(MBB->getNumber());
}
}
+ ProcessedBlocks.clear();
// T1->stopTimer();
}
@@ -295,7 +300,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (!takeReg(VReg))
continue;
- VRegMaskPair VMP(U, *TRI);
+ VRegMaskPair VMP(U, TRI, MRI);
// We don't need to make room for the PHI uses as they operands must
// already present in the corresponding predecessor Active set! Just
@@ -305,9 +310,12 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
auto B = I->getOperand(++OpNo);
assert(B.isMBB());
MachineBasicBlock *ValueSrc = B.getMBB();
- if (MDT.properlyDominates(ValueSrc, &MBB)) {
+
+ if (ProcessedBlocks.contains(ValueSrc->getNumber())) {
+ auto Info = getBlockInfo(*ValueSrc);
+ dumpRegSet(Info.ActiveSet);
assert(getBlockInfo(*ValueSrc).ActiveSet.contains(VMP) &&
- "PHI node input value is not live ougt predecessor!");
+ "PHI node input value is not live out predecessor!");
}
continue;
}
@@ -332,7 +340,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
for (auto D : I->defs()) {
Register R = D.getReg();
if (takeReg(R)) {
- Active.insert({R, LaneBitmask::getAll()});
+ Active.insert(VRegMaskPair(D, TRI, MRI));
}
}
continue;
@@ -341,7 +349,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
RegisterSet Defs;
for (auto D : I->defs()) {
if (D.getReg().isVirtual() && takeReg(D.getReg()))
- Defs.insert(VRegMaskPair(D, *TRI));
+ Defs.insert(VRegMaskPair(D, TRI, MRI));
}
if (Reloads.empty() && Defs.empty()) {
@@ -413,7 +421,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
assert(B.isMBB());
MachineBasicBlock *ValueSrc = B.getMBB();
if (ValueSrc->getNumber() == MBB.getNumber()) {
- VRegMaskPair VMP(U, *TRI);
+ VRegMaskPair VMP(U, TRI, MRI);
if (!isCoveredActive(VMP, Active)) {
reloadBefore(MBB, MBB.getFirstInstrTerminator(), VMP);
}
@@ -447,7 +455,7 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
for (auto &PU : PHI.uses()) {
if (PU.isReg()) {
if (takeReg(PU.getReg())) {
- VRegMaskPair P(PU, *TRI);
+ VRegMaskPair P(PU, TRI, MRI);
PHIOps.insert(P);
}
}
@@ -471,13 +479,35 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
}
for (auto Pred : Preds) {
- //dumpRegSet(getBlockInfo(*Pred).SpillSet);
+ dumpRegSet(getBlockInfo(*Pred).SpillSet);
Entry.SpillSet.set_union(getBlockInfo(*Pred).SpillSet);
- //dumpRegSet(Entry.SpillSet);
+ dumpRegSet(Entry.SpillSet);
}
- set_intersect(Entry.SpillSet, Entry.ActiveSet);
+ // The line below was added according to algorithm proposed in Hack&Broun.
+ // It is commented out because of the following observation:
+ // If some reister is spilled in block it is not in its active set anymore.
+ // If this block has the only one successor, then the successor active set is
+ // equal to the block active set. Then the line below removes the spilled
+ // register from its spilled set and will not propagate it to the successors
+ // along the CFG. If we have later on a join block with multiple predecessors,
+ // then the spilled register will not be spilled along the path to that join
+ // block from the common dominator.
+ // BB0 [x active]
+ // / \
+ // BB1 \ [x spilled]
+ // | |
+ // BB2 | [x is not in BB1 Active set =>
+ // \ | it is not in BB2 Active set =>
+ // \ | BB2.Spilled ^ BB2.Active yeilds empty set]
+ // \/
+ // BB3 [x is not in BB2 Spilled set => will not be spilled along
+ // the BB0 -> BB3 edge. If we have ause of x inBB3 reload will
+ // fail if the CF reached BB3 along the BB0 -> BB3 edge]
+
+ // set_intersect(Entry.SpillSet, Entry.ActiveSet);
+
for (auto Pred : Preds) {
- auto PE = getBlockInfo(*Pred);
+ auto &PE = getBlockInfo(*Pred);
LLVM_DEBUG(dbgs() << "\nCurr block [ MBB_" << MBB.getNumber() << "."
<< MBB.getName() << " ] Active Set:\n";
dumpRegSet(Entry.ActiveSet);
@@ -506,12 +536,17 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
}
}
+ LLVM_DEBUG(dbgs() << "\nPred [ MBB_" << Pred->getNumber() << "."
+ << Pred->getName() << " ] SpillSet:\n";
+ dumpRegSet(PE.SpillSet));
for (auto S : set_intersection(set_difference(Entry.SpillSet, PE.SpillSet),
PE.ActiveSet)) {
spillAtEnd(*Pred, S);
// FIXME: Do we need to update sets?
PE.SpillSet.insert(S);
+ PE.ActiveSet.remove(S);
Entry.SpillSet.insert(S);
+ Entry.ActiveSet.remove(S);
}
}
}
@@ -564,7 +599,21 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
continue;
if (takeReg(VReg) && LIS.isLiveInToMBB(LIS.getInterval(VReg), &MBB)) {
- LiveIn.insert({VReg, LaneBitmask::getAll()});
+ // we have to take care ofthe subreg index and set LaneMask accordingly
+ // LaneBitmask LaneMask = LaneBitmask::getAll();
+ // RegisterSet Preds;
+ // for (auto Pred : MBB.predecessors()) {
+ // auto PredActive = getBlockInfo(*Pred).ActiveSet;
+ // set_intersect()
+ // for (auto P : PredActive) {
+ // if (P.VReg == VReg) {
+ // LaneMask = P.LaneMask;
+ // break;
+ // }
+ // }
+ // }
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
+ LiveIn.insert(VRegMaskPair(VReg, getFullMaskForRC(*RC, TRI)));
}
}
@@ -625,17 +674,23 @@ const TargetRegisterClass *
AMDGPUSSASpiller::getRegClassForVregMaskPair(VRegMaskPair VMP,
unsigned &SubRegIdx) {
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VMP.VReg);
-
- if (!VMP.LaneMask.all()) {
- SmallVector<unsigned> Idxs;
- if (TRI->getCoveringSubRegIndexes(*MRI, RC, VMP.LaneMask, Idxs)) {
- SubRegIdx = Idxs[0];
- for (unsigned i = 1; i < Idxs.size() - 1; i++)
- SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, Idxs[i]);
- RC = TRI->getSubRegisterClass(RC, SubRegIdx);
- }
+ LaneBitmask Mask = getFullMaskForRC(*RC, TRI);
+ if (VMP.LaneMask != Mask) {
+ unsigned SubRegIdx = getSubRegIndexForLaneMask(VMP.LaneMask, TRI);
+ RC = TRI->getSubRegisterClass(RC, SubRegIdx);
}
+ // if (!VMP.LaneMask.all()) {
+ // SmallVector<unsigned> Idxs;
+ // if (TRI->getCoveringSubRegIndexes(*MRI, RC, VMP.LaneMask, Idxs)) {
+ // SubRegIdx = Idxs[0];
+ // // FIXME: Idxs.size() - 1 ?
+ // for (unsigned i = 1; i < Idxs.size() - 1; i++)
+ // SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, Idxs[i]);
+ // RC = TRI->getSubRegisterClass(RC, SubRegIdx);
+ // }
+ // }
+
return RC;
}
@@ -666,40 +721,39 @@ void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
// then this, reloaded here.
SmallVector<MachineOperand*> ToUpdate;
for (auto &U : MRI->use_nodbg_operands(VMP.VReg)) {
- if (SpillPoints.contains(VMP)) {
- MachineInstr *UseMI = U.getParent();
- MachineInstr *Spill = SpillPoints[VMP];
- VRegMaskPair UseVMP(U, *TRI);
- if (UseMI != Spill && MDT.dominates(Spill, UseMI) && UseVMP == VMP)
- ToUpdate.push_back(&U);
- } else {
- llvm::report_fatal_error(
- "We're going to reload VReg which has not been spilled!");
- }
- }
- for (auto U : ToUpdate) {
- // FIXME: Do we always want "AtEndOfBlock"?
- U->setSubReg(AMDGPU::NoRegister);
- U->setReg(Updater.GetValueAtEndOfBlock(&MBB));
+ // if (SpillPoints.contains(VMP)) {
+ // MachineInstr *UseMI = U.getParent();
+
+ // // FIXME: If we have 2 spills ? Which one dominates current Use?
+ // // SpillPoints should be the map [VMP => [Vector[MIs]]]
+ // // Proper fix: MachineSSAUpdater should take care of this!!!
+ // MachineInstr *Spill = SpillPoints[VMP];
+ // VRegMaskPair UseVMP(U, TRI, MRI);
+ // if (UseMI != Spill && MDT.dominates(Spill, UseMI) && UseVMP == VMP)
+ // ToUpdate.push_back(&U);
+ // } else {
+ // llvm::report_fatal_error(
+ // "We're going to reload VReg which has not been spilled!");
+ // }
+ MachineInstr *UseMI = U.getParent();
+ if (MDT.dominates(&ReloadMI, UseMI))
+ Updater.RewriteUse(U);
}
+ // for (auto U : ToUpdate) {
+ // // FIXME: Do we always want "AtEndOfBlock"?
+ // U->setSubReg(AMDGPU::NoRegister);
+ // U->setReg(Updater.GetValueAtEndOfBlock(&MBB));
+ // }
LIS.createAndComputeVirtRegInterval(NewVReg);
auto &Entry = getBlockInfo(MBB);
- Entry.ActiveSet.insert({NewVReg, LaneBitmask::getAll()});
+ Entry.ActiveSet.insert({NewVReg, getFullMaskForRC(*RC, TRI)});
}
void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
VRegMaskPair VMP) {
- unsigned SubRegIdx = 0;
- SmallVector<unsigned> Idxs;
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VMP.VReg);
- bool HasSubReg = TRI->getCoveringSubRegIndexes(*MRI, RC, VMP.LaneMask, Idxs);
- if (HasSubReg) {
- SubRegIdx = Idxs[0];
- for (int i = 1; i < Idxs.size() - 1; i++)
- SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, Idxs[i]);
- RC = TRI->getSubRegisterClass(RC, SubRegIdx);
- }
+ unsigned SubRegIdx = getSubRegIndexForLaneMask(VMP.LaneMask, TRI);
+ const TargetRegisterClass *RC = getRegClassForVregMaskPair(VMP, SubRegIdx);
int FI = assignVirt2StackSlot(VMP);
TII->storeRegToStackSlot(MBB, InsertBefore, VMP.VReg, true, FI, RC, TRI,
@@ -755,7 +809,7 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
Active.remove_if([&](VRegMaskPair P) { return NU.isDead(MBB, I, P); });
- LLVM_DEBUG(dbgs() << "\nActive set after DEAD VRegs removed:\n";
+ LLVM_DEBUG(dbgs() << "\n\"limit\": Active set after DEAD VRegs removed:\n";
dumpRegSet(Active));
unsigned CurRP = getSizeInRegs(Active);
@@ -779,17 +833,18 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
SmallVector<VRegMaskPair> Sorted = NU.getSortedSubregUses(I, P);
- for (auto P : Sorted) {
- unsigned Size = getSizeInRegs(P);
+ for (auto S : Sorted) {
+ unsigned Size = getSizeInRegs(S);
CurRP -= Size;
- if (!Spilled.contains(P))
- ToSpill.insert(P);
- ActiveMask &= (~P.LaneMask);
+ if (!Spilled.contains(S))
+ ToSpill.insert(S);
+ ActiveMask &= (~S.LaneMask);
if (CurRP == Limit)
break;
}
if (ActiveMask.any()) {
+ // Insert the remaining part of the P to the Active set.
VRegMaskPair Q(P.VReg, ActiveMask);
// printVRegMaskPair(Q);
Active.insert(Q);
@@ -809,6 +864,22 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
NumSpills++;
Spilled.insert(R);
}
+
+ if (!ToSpill.empty()) {
+ dbgs() << "\nActive set after spilling:\n";
+ dumpRegSet(Active);
+ dbgs() << "\nSpilled set after spilling:\n";
+ dumpRegSet(Spilled);
+ }
+
+ LLVM_DEBUG(
+ if (!ToSpill.empty()) {
+ dbgs() << "\nActive set after spilling:\n";
+ dumpRegSet(Active);
+ dbgs() << "\nSpilled set after spilling:\n";
+ dumpRegSet(Spilled);
+ }
+);
// T2->stopTimer();
return NumSpills;
}
>From cdc69859b0c54c2730bb185dd1ec9a880e8128df Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Thu, 12 Jun 2025 22:57:50 +0200
Subject: [PATCH 26/46] SSA Spiller. reloadBefore reworked.
---
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 128 +++++++++++---------
1 file changed, 73 insertions(+), 55 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 3c26a842804a1..7885e0bbf4882 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -140,13 +140,15 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
void initActiveSetUsualBlock(MachineBasicBlock &MBB);
void initActiveSetLoopHeader(MachineBasicBlock &MBB);
- void reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP);
+ Register reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP);
void spillAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP);
- void reloadBefore(MachineBasicBlock &MBB,
+ Register reloadBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, VRegMaskPair VMP);
void spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, VRegMaskPair VMP);
+ void rewriteUses(MachineBasicBlock &MBB, Register OldVReg, Register NewVReg);
+
unsigned getLoopMaxRP(MachineLoop *L);
// Returns number of spilled VRegs
unsigned limit(MachineBasicBlock &MBB, RegisterSet &Active, RegisterSet &Spilled,
@@ -381,7 +383,8 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
for (auto R : Reloads) {
LLVM_DEBUG(dbgs() << "\nReloading "; printVRegMaskPair(R);
dbgs() << "\n");
- reloadBefore(MBB, I, R);
+ Register NewVReg = reloadBefore(MBB, I, R);
+ rewriteUses(MBB, R.VReg, NewVReg);
}
std::advance(I, NSpills);
@@ -423,7 +426,29 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (ValueSrc->getNumber() == MBB.getNumber()) {
VRegMaskPair VMP(U, TRI, MRI);
if (!isCoveredActive(VMP, Active)) {
- reloadBefore(MBB, MBB.getFirstInstrTerminator(), VMP);
+ Register NewVReg = reloadAtEnd(MBB, VMP);
+ U.setReg(NewVReg);
+ U.setSubReg(AMDGPU::NoRegister);
+
+ // The code below is commented out because of the BUG in
+ // MachineSSAUpdater. In case the register class of a PHI operand
+ // defined register is a superclass of a NewReg it inserts a COPY
+ // AFTER the PHI
+
+ // Predecessor:
+ // %157:vgpr_32 = SI_SPILL_V32_RESTORE %stack.0
+
+ // %146:vreg_64 = PHI %70:vreg_64.sub0, %bb.3, %144:vgpr_32, %bb.1
+
+ // becomes:
+
+ // %146:vreg_64 = PHI %158:vreg_64.sub0, %bb.3, %144:vgpr_32,
+ // %bb.1 %158:vreg_64 = COPY %157
+
+ // MachineSSAUpdater SSAUpddater(*MBB.getParent());
+ // SSAUpddater.Initialize(U.getReg());
+ // SSAUpddater.AddAvailableValue(&MBB, NewVReg);
+ // SSAUpddater.RewriteUse(U);
}
}
}
@@ -523,16 +548,18 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
set_intersect(ReloadInPred, PE.SpillSet);
dumpRegSet(ReloadInPred);
if (!ReloadInPred.empty()) {
- // We're about to insert N reloads at the end of the predecessor block.
- // Make sure we have enough registers for N definitions or spill to make
- // room for them.
- limit(*Pred, PE.ActiveSet, PE.SpillSet, Pred->getFirstTerminator(),
- NumAvailableRegs - getSizeInRegs(ReloadInPred));
+
+ // Since we operate on SSA, any register that is live across the edge must
+ // either be defined before or within the IDom, or be a PHI operand. If a
+ // register is neither a PHI operand nor live-out from all predecessors,
+ // it must have been spilled in one of them. Registers that are defined
+ // and used entirely within a predecessor are dead at its exit. Therefore,
+ // there is always room to reload a register that is not live across the
+ // edge.
for (auto R : ReloadInPred) {
- reloadAtEnd(*Pred, R);
- // FIXME: Do we need to update sets?
- PE.ActiveSet.insert(R);
+ Register NewVReg = reloadAtEnd(*Pred, R);
+ rewriteUses(*Pred, R.VReg, NewVReg);
}
}
@@ -542,7 +569,6 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
for (auto S : set_intersection(set_difference(Entry.SpillSet, PE.SpillSet),
PE.ActiveSet)) {
spillAtEnd(*Pred, S);
- // FIXME: Do we need to update sets?
PE.SpillSet.insert(S);
PE.ActiveSet.remove(S);
Entry.SpillSet.insert(S);
@@ -694,15 +720,15 @@ AMDGPUSSASpiller::getRegClassForVregMaskPair(VRegMaskPair VMP,
return RC;
}
-void AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
- reloadBefore(MBB, MBB.getFirstInstrTerminator(), VMP);
+Register AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
+ return reloadBefore(MBB, MBB.getFirstInstrTerminator(), VMP);
}
void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
spillBefore(MBB, MBB.getFirstTerminator(), VMP);
}
-void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
+Register AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
VRegMaskPair VMP) {
unsigned SubRegIdx = 0;
@@ -713,40 +739,11 @@ void AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
// FIXME: dirty hack! To avoid further changing the TargetInstrInfo interface.
MachineInstr &ReloadMI = *(--InsertBefore);
LIS.InsertMachineInstrInMaps(ReloadMI);
- MachineSSAUpdater Updater(*MBB.getParent());
- Updater.Initialize(NewVReg);
- Updater.AddAvailableValue(ReloadMI.getParent(), NewVReg);
- // FIXME: we'd better pass the exact UseMI here to avoid scanning all the
- // users. isCoveredActive takes care of possible uses with the mask narrower
- // then this, reloaded here.
- SmallVector<MachineOperand*> ToUpdate;
- for (auto &U : MRI->use_nodbg_operands(VMP.VReg)) {
- // if (SpillPoints.contains(VMP)) {
- // MachineInstr *UseMI = U.getParent();
-
- // // FIXME: If we have 2 spills ? Which one dominates current Use?
- // // SpillPoints should be the map [VMP => [Vector[MIs]]]
- // // Proper fix: MachineSSAUpdater should take care of this!!!
- // MachineInstr *Spill = SpillPoints[VMP];
- // VRegMaskPair UseVMP(U, TRI, MRI);
- // if (UseMI != Spill && MDT.dominates(Spill, UseMI) && UseVMP == VMP)
- // ToUpdate.push_back(&U);
- // } else {
- // llvm::report_fatal_error(
- // "We're going to reload VReg which has not been spilled!");
- // }
- MachineInstr *UseMI = U.getParent();
- if (MDT.dominates(&ReloadMI, UseMI))
- Updater.RewriteUse(U);
- }
- // for (auto U : ToUpdate) {
- // // FIXME: Do we always want "AtEndOfBlock"?
- // U->setSubReg(AMDGPU::NoRegister);
- // U->setReg(Updater.GetValueAtEndOfBlock(&MBB));
- // }
+
LIS.createAndComputeVirtRegInterval(NewVReg);
auto &Entry = getBlockInfo(MBB);
Entry.ActiveSet.insert({NewVReg, getFullMaskForRC(*RC, TRI)});
+ return NewVReg;
}
void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
@@ -780,6 +777,29 @@ void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
SpillPoints[VMP] = &Spill;
}
+void AMDGPUSSASpiller::rewriteUses(MachineBasicBlock &MBB, Register OldVReg,
+ Register NewVReg) {
+ MachineSSAUpdater SSAUpdater(*MBB.getParent());
+ SSAUpdater.Initialize(OldVReg);
+ SSAUpdater.AddAvailableValue(&MBB, NewVReg);
+ for (MachineOperand &UseOp : MRI->use_operands(OldVReg)) {
+ MachineInstr *UseMI = UseOp.getParent();
+ MachineBasicBlock *UseMBB = UseMI->getParent();
+
+ if (UseMBB->getNumber() == MBB.getNumber()) {
+ UseOp.setReg(NewVReg);
+ UseOp.setSubReg(AMDGPU::NoRegister);
+ } else {
+ // We skip rewriting if SSAUpdater already has a dominating def for
+ // this block
+ if (SSAUpdater.HasValueForBlock(UseMBB))
+ continue;
+ // This rewrites the use to a PHI result or correct value
+ SSAUpdater.RewriteUse(UseOp);
+ }
+ }
+}
+
unsigned AMDGPUSSASpiller::getLoopMaxRP(MachineLoop *L) {
unsigned MaxRP = 0;
for (auto MBB : L->getBlocks()) {
@@ -872,14 +892,12 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
dumpRegSet(Spilled);
}
- LLVM_DEBUG(
- if (!ToSpill.empty()) {
- dbgs() << "\nActive set after spilling:\n";
- dumpRegSet(Active);
- dbgs() << "\nSpilled set after spilling:\n";
- dumpRegSet(Spilled);
- }
-);
+ LLVM_DEBUG(if (!ToSpill.empty()) {
+ dbgs() << "\nActive set after spilling:\n";
+ dumpRegSet(Active);
+ dbgs() << "\nSpilled set after spilling:\n";
+ dumpRegSet(Spilled);
+ });
// T2->stopTimer();
return NumSpills;
}
>From 5da83fc17195093191ef368570f094d7635918f7 Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Fri, 13 Jun 2025 18:57:24 +0200
Subject: [PATCH 27/46] SSA Spiller. MachineSSAUpdater fix.
---
llvm/include/llvm/CodeGen/MachineSSAUpdater.h | 1 +
llvm/lib/CodeGen/MachineSSAUpdater.cpp | 26 +++++++++----------
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 12 ++++-----
3 files changed, 19 insertions(+), 20 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/MachineSSAUpdater.h b/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
index 3305e90f696d4..1c58aa9543588 100644
--- a/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
+++ b/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
@@ -50,6 +50,7 @@ class MachineSSAUpdater {
const TargetInstrInfo *TII = nullptr;
MachineRegisterInfo *MRI = nullptr;
+ const TargetRegisterInfo *TRI = nullptr;
public:
/// MachineSSAUpdater constructor. If InsertedPHIs is specified, it will be
diff --git a/llvm/lib/CodeGen/MachineSSAUpdater.cpp b/llvm/lib/CodeGen/MachineSSAUpdater.cpp
index f0a136751bbff..3ed9f705c9ca5 100644
--- a/llvm/lib/CodeGen/MachineSSAUpdater.cpp
+++ b/llvm/lib/CodeGen/MachineSSAUpdater.cpp
@@ -41,9 +41,9 @@ static AvailableValsTy &getAvailableVals(void *AV) {
}
MachineSSAUpdater::MachineSSAUpdater(MachineFunction &MF,
- SmallVectorImpl<MachineInstr*> *NewPHI)
- : InsertedPHIs(NewPHI), TII(MF.getSubtarget().getInstrInfo()),
- MRI(&MF.getRegInfo()) {}
+ SmallVectorImpl<MachineInstr *> *NewPHI)
+ : InsertedPHIs(NewPHI), TII(MF.getSubtarget().getInstrInfo()),
+ MRI(&MF.getRegInfo()), TRI(MF.getSubtarget().getRegisterInfo()) {}
MachineSSAUpdater::~MachineSSAUpdater() {
delete static_cast<AvailableValsTy*>(AV);
@@ -236,23 +236,21 @@ void MachineSSAUpdater::RewriteUse(MachineOperand &U) {
NewVR = GetValueInMiddleOfBlock(UseMI->getParent());
}
- // Insert a COPY if needed to satisfy register class constraints for the using
- // MO. Or, if possible, just constrain the class for NewVR to avoid the need
- // for a COPY.
+ unsigned SubRegIndex = U.getSubReg();
+
if (NewVR) {
const TargetRegisterClass *UseRC =
dyn_cast_or_null<const TargetRegisterClass *>(RegAttrs.RCOrRB);
- if (UseRC && !MRI->constrainRegClass(NewVR, UseRC)) {
- MachineBasicBlock *UseBB = UseMI->getParent();
- MachineInstr *InsertedCopy =
- InsertNewDef(TargetOpcode::COPY, UseBB, UseBB->getFirstNonPHI(),
- RegAttrs, MRI, TII)
- .addReg(NewVR);
- NewVR = InsertedCopy->getOperand(0).getReg();
- LLVM_DEBUG(dbgs() << " Inserted COPY: " << *InsertedCopy);
+ const TargetRegisterClass *NewRC = MRI->getRegClass(NewVR);
+ if (SubRegIndex) {
+ const TargetRegisterClass *SubRC =
+ TRI->getSubRegisterClass(UseRC, SubRegIndex);
+ if (NewRC == SubRC)
+ SubRegIndex = 0;
}
}
U.setReg(NewVR);
+ U.setSubReg(SubRegIndex);
}
namespace llvm {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 7885e0bbf4882..2155ba3a91355 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -427,8 +427,8 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
VRegMaskPair VMP(U, TRI, MRI);
if (!isCoveredActive(VMP, Active)) {
Register NewVReg = reloadAtEnd(MBB, VMP);
- U.setReg(NewVReg);
- U.setSubReg(AMDGPU::NoRegister);
+ // U.setReg(NewVReg);
+ // U.setSubReg(AMDGPU::NoRegister);
// The code below is commented out because of the BUG in
// MachineSSAUpdater. In case the register class of a PHI operand
@@ -445,10 +445,10 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
// %146:vreg_64 = PHI %158:vreg_64.sub0, %bb.3, %144:vgpr_32,
// %bb.1 %158:vreg_64 = COPY %157
- // MachineSSAUpdater SSAUpddater(*MBB.getParent());
- // SSAUpddater.Initialize(U.getReg());
- // SSAUpddater.AddAvailableValue(&MBB, NewVReg);
- // SSAUpddater.RewriteUse(U);
+ MachineSSAUpdater SSAUpddater(*MBB.getParent());
+ SSAUpddater.Initialize(U.getReg());
+ SSAUpddater.AddAvailableValue(&MBB, NewVReg);
+ SSAUpddater.RewriteUse(U);
}
}
}
>From b576c61586625444190441ce7afd987735cc3351 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Tue, 17 Jun 2025 15:26:28 +0000
Subject: [PATCH 28/46] SSA Spiller. WIP.
Unused MachineDominatorTree removed.
Renamed list added to avoid replacing the register operands
which have not been renamed. Minor bugfixes.
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 2 +-
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 2 +-
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 10 +++++---
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 25 +++++--------------
4 files changed, 15 insertions(+), 24 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index c2a188eb79fc7..ad52279320bb0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -209,7 +209,7 @@ NextUseResult::getSortedSubregUses(const MachineBasicBlock::iterator I,
}
}
}
- return std::move(Result);
+ return Result;
}
void NextUseResult::dumpUsedInBlock() {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 7ff76b52c2d49..3587cd9b4e091 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -120,7 +120,7 @@ class NextUseResult {
SmallVector<unsigned> Keys;
for (auto P : NextUseMap)
Keys.push_back(P.first);
- return std::move(Keys);
+ return Keys;
}
bool contains(unsigned Key) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index 971b411f33b75..98cca12c3c956 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -43,6 +43,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
DenseMap<MachineInstr *, unsigned> PHIMap;
DenseMap<unsigned, VRegDefStack> VregNames;
DenseSet<unsigned> DefSeen;
+ DenseSet<unsigned> Renamed;
void collectCrossBlockVRegs(MachineFunction &MF);
void findPHINodesPlacement(const SmallPtrSetImpl<MachineBasicBlock *> &LiveInBlocks,
@@ -68,10 +69,11 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
}
for (auto &I : make_range(MBB.getFirstNonPHI(), MBB.end())) {
- // TODO: Need to support the RENAIMED set to avoid replacing the registers
+ // We to support the RENAIMED set to avoid replacing the registers
// which were not renamed in uses!
for (auto &Op : I.uses()) {
- if (Op.isReg() && Op.getReg().isVirtual()) {
+ if (Op.isReg() && Op.getReg().isVirtual() &&
+ Renamed.contains(Op.getReg())) {
unsigned VReg = Op.getReg();
assert(!VregNames[VReg].empty() &&
"Error: use does not dominated by definition!\n");
@@ -159,6 +161,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
LLVM_DEBUG(dbgs()
<< "Renaming VReg: " << Register::virtReg2Index(VReg)
<< " to " << Register::virtReg2Index(NewVReg) << "\n");
+ Renamed.insert(VReg);
} else {
VregNames[VReg].push_back(
{VReg, getOperandLaneMask(Op, TRI, MRI), Op.getSubReg(), &I});
@@ -255,7 +258,8 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
PHINodes.clear();
VregNames.clear();
DefSeen.clear();
-
+ Renamed.clear();
+
// Collect all cross-block virtual registers.
// This includes registers that are live-in to the function, and registers
// that are defined in multiple blocks.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 2155ba3a91355..7fcf25eac0c99 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -28,7 +28,6 @@ namespace {
class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
LiveIntervals &LIS;
MachineLoopInfo &LI;
- MachineDominatorTree &MDT;
AMDGPUNextUseAnalysis::Result &NU;
MachineRegisterInfo *MRI;
const SIRegisterInfo *TRI;
@@ -36,8 +35,6 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
const GCNSubtarget *ST;
MachineFrameInfo *MFI;
- static constexpr int NO_STACK_SLOT = INT_MAX;
-
unsigned NumSpillSlots;
DenseMap<VRegMaskPair, unsigned> Virt2StackSlotMap;
@@ -195,13 +192,9 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
bool isCoveredActive(VRegMaskPair VMP, const RegisterSet Active);
public:
- AMDGPUSSASpiller() = default;
-
AMDGPUSSASpiller(LiveIntervals &LIS, MachineLoopInfo &LI,
- MachineDominatorTree &MDT, AMDGPUNextUseAnalysis::Result &NU)
- : LIS(LIS), LI(LI), MDT(MDT), NU(NU), NumSpillSlots(0) //,
- // Virt2StackSlotMap(NO_STACK_SLOT) {
- {
+ AMDGPUNextUseAnalysis::Result &NU)
+ : LIS(LIS), LI(LI), NU(NU), NumSpillSlots(0) {
TG = new TimerGroup("SSA SPiller Timing",
"Time Spent in different parts of the SSA Spiller");
T1 = new Timer("General time", "ProcessFunction", *TG);
@@ -215,8 +208,8 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
delete T2;
delete T3;
delete T4;
- //delete TG;
- }
+ // delete TG;
+ }
bool run(MachineFunction &MF);
};
@@ -968,9 +961,8 @@ llvm::AMDGPUSSASpillerPass::run(MachineFunction &MF,
MachineFunctionAnalysisManager &MFAM) {
LiveIntervals &LIS = MFAM.getResult<LiveIntervalsAnalysis>(MF);
MachineLoopInfo &LI = MFAM.getResult<MachineLoopAnalysis>(MF);
- MachineDominatorTree &MDT = MFAM.getResult<MachineDominatorTreeAnalysis>(MF);
AMDGPUNextUseAnalysis::Result &NU = MFAM.getResult<AMDGPUNextUseAnalysis>(MF);
- AMDGPUSSASpiller Impl(LIS, LI, MDT, NU);
+ AMDGPUSSASpiller Impl(LIS, LI, NU);
bool Changed = Impl.run(MF);
if (!Changed)
return PreservedAnalyses::all();
@@ -996,8 +988,6 @@ class AMDGPUSSASpillerLegacy : public MachineFunctionPass {
AU.setPreservesCFG();
AU.addRequiredTransitiveID(MachineLoopInfoID);
AU.addPreservedID(MachineLoopInfoID);
- AU.addRequiredTransitiveID(MachineDominatorsID);
- AU.addPreservedID(MachineDominatorsID);
AU.addRequired<LiveIntervalsWrapperPass>();
AU.addRequired<AMDGPUNextUseAnalysisWrapper>();
MachineFunctionPass::getAnalysisUsage(AU);
@@ -1007,17 +997,14 @@ class AMDGPUSSASpillerLegacy : public MachineFunctionPass {
bool AMDGPUSSASpillerLegacy::runOnMachineFunction(MachineFunction &MF) {
LiveIntervals &LIS = getAnalysis<LiveIntervalsWrapperPass>().getLIS();
MachineLoopInfo &LI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
- MachineDominatorTree &MDT =
- getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
AMDGPUNextUseAnalysis::Result &NU =
getAnalysis<AMDGPUNextUseAnalysisWrapper>().getNU();
- AMDGPUSSASpiller Impl(LIS, LI, MDT, NU);
+ AMDGPUSSASpiller Impl(LIS, LI, NU);
return Impl.run(MF);
}
INITIALIZE_PASS_BEGIN(AMDGPUSSASpillerLegacy, DEBUG_TYPE, "AMDGPU SSA Spiller",
false, false)
-INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AMDGPUNextUseAnalysisWrapper)
>From 3921dce990af6c5a7bf662d8ca647dc579115e53 Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Thu, 19 Jun 2025 22:09:08 +0200
Subject: [PATCH 29/46] SSA RA: Code refactoring: VRegMaskPair moved to common
header file. + RebuildSSA - rename function reworked
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 44 ++++--
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 80 +++--------
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 129 ++++++++++-------
llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h | 2 +-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 133 +++++++-----------
llvm/lib/Target/AMDGPU/VRegMaskPair.h | 103 ++++++++++++++
6 files changed, 278 insertions(+), 213 deletions(-)
create mode 100644 llvm/lib/Target/AMDGPU/VRegMaskPair.h
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index ad52279320bb0..07c6fc082141b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -194,17 +194,17 @@ NextUseResult::getSortedSubregUses(const MachineBasicBlock::iterator I,
unsigned MBBNum = MBB->getNumber();
if (NextUseMap.contains(MBBNum) &&
NextUseMap[MBBNum].InstrDist.contains(&*I)) {
- VRegDistances Dists = NextUseMap[MBBNum].InstrDist[&*I];
- if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP.VReg)) {
+ // VRegDistances Dists = NextUseMap[MBBNum].InstrDist[&*I];
+ if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP.getVReg())) {
VRegDistances::SortedRecords Dists =
- NextUseMap[MBBNum].InstrDist[&*I][VMP.VReg];
- LLVM_DEBUG(dbgs() << "Mask : [" << PrintLaneMask(VMP.LaneMask) << "]\n");
+ NextUseMap[MBBNum].InstrDist[&*I][VMP.getVReg()];
+ LLVM_DEBUG(dbgs() << "Mask : [" << PrintLaneMask(VMP.getLaneMask()) << "]\n");
for (auto P : reverse(Dists)) {
LaneBitmask UseMask = P.first;
LLVM_DEBUG(dbgs() << "Used mask : [" << PrintLaneMask(UseMask)
<< "]\n");
- if ((UseMask & VMP.LaneMask) == UseMask) {
- Result.push_back({VMP.VReg, UseMask});
+ if ((UseMask & VMP.getLaneMask()) == UseMask) {
+ Result.push_back({VMP.getVReg(), UseMask});
}
}
}
@@ -212,13 +212,33 @@ NextUseResult::getSortedSubregUses(const MachineBasicBlock::iterator I,
return Result;
}
+SmallVector<VRegMaskPair>
+NextUseResult::getSortedSubregUses(const MachineBasicBlock &MBB,
+ const VRegMaskPair VMP) {
+ SmallVector<VRegMaskPair> Result;
+ unsigned MBBNum = MBB.getNumber();
+ if (NextUseMap.contains(MBBNum) &&
+ NextUseMap[MBBNum].Bottom.contains(VMP.getVReg())) {
+ VRegDistances::SortedRecords Dists = NextUseMap[MBBNum].Bottom[VMP.getVReg()];
+ LLVM_DEBUG(dbgs() << "Mask : [" << PrintLaneMask(VMP.getLaneMask()) << "]\n");
+ for (auto P : reverse(Dists)) {
+ LaneBitmask UseMask = P.first;
+ LLVM_DEBUG(dbgs() << "Used mask : [" << PrintLaneMask(UseMask) << "]\n");
+ if ((UseMask & VMP.getLaneMask()) == UseMask) {
+ Result.push_back({VMP.getVReg(), UseMask});
+ }
+ }
+ }
+ return Result;
+}
+
void NextUseResult::dumpUsedInBlock() {
LLVM_DEBUG(for (auto P
: UsedInBlock) {
dbgs() << "MBB_" << P.first << ":\n";
for (auto VMP : P.second) {
- dbgs() << "[ " << printReg(VMP.VReg) << " : <"
- << PrintLaneMask(VMP.LaneMask) << "> ]\n";
+ dbgs() << "[ " << printReg(VMP.getVReg()) << " : <"
+ << PrintLaneMask(VMP.getLaneMask()) << "> ]\n";
}
});
}
@@ -231,9 +251,9 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock::iterator I,
if (NextUseMap.contains(MBBNum) &&
NextUseMap[MBBNum].InstrDist.contains(&*I)) {
VRegDistances Dists = NextUseMap[MBBNum].InstrDist[&*I];
- if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP.VReg)) {
+ if (NextUseMap[MBBNum].InstrDist[&*I].contains(VMP.getVReg())) {
// printSortedRecords(Dists[VMP.VReg], VMP.VReg);
- getFromSortedRecords(Dists[VMP.VReg], VMP.LaneMask, Dist);
+ getFromSortedRecords(Dists[VMP.getVReg()], VMP.getLaneMask(), Dist);
}
}
@@ -245,8 +265,8 @@ unsigned NextUseResult::getNextUseDistance(const MachineBasicBlock &MBB,
unsigned Dist = Infinity;
unsigned MBBNum = MBB.getNumber();
if (NextUseMap.contains(MBBNum)) {
- if (NextUseMap[MBBNum].Bottom.contains(VMP.VReg)) {
- getFromSortedRecords(NextUseMap[MBBNum].Bottom[VMP.VReg], VMP.LaneMask,
+ if (NextUseMap[MBBNum].Bottom.contains(VMP.getVReg())) {
+ getFromSortedRecords(NextUseMap[MBBNum].Bottom[VMP.getVReg()], VMP.getLaneMask(),
Dist);
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 3587cd9b4e091..63cab36e1c3ba 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -17,6 +17,7 @@
#include "SIRegisterInfo.h"
#include "GCNSubtarget.h"
#include "AMDGPUSSARAUtils.h"
+#include "VRegMaskPair.h"
#include <algorithm>
#include <limits>
@@ -26,55 +27,6 @@ using namespace llvm;
// namespace {
-struct VRegMaskPair {
-public:
- Register VReg;
- LaneBitmask LaneMask;
-
- VRegMaskPair(Register VReg, LaneBitmask LaneMask)
- : VReg(VReg), LaneMask(LaneMask) {}
-
- VRegMaskPair(const MachineOperand MO, const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI) {
- assert(MO.isReg() && "Not a register operand!");
- Register R = MO.getReg();
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, R);
- assert(R.isVirtual() && "Not a virtual register!");
- VReg = R;
- LaneMask = getFullMaskForRC(*RC, TRI);
- unsigned subRegIndex = MO.getSubReg();
- if (subRegIndex) {
- LaneMask = TRI->getSubRegIndexLaneMask(subRegIndex);
- }
- }
-
- bool operator==(const VRegMaskPair &other) const {
- return VReg == other.VReg && LaneMask == other.LaneMask;
- }
-};
-
-template<>
-struct DenseMapInfo<VRegMaskPair> {
- static inline VRegMaskPair getEmptyKey() {
- return {Register(DenseMapInfo<unsigned>::getEmptyKey()),
- LaneBitmask(0xFFFFFFFFFFFFFFFFULL)};
- }
-
- static inline VRegMaskPair getTombstoneKey() {
- return { Register(DenseMapInfo<unsigned>::getTombstoneKey()),
- LaneBitmask(0xFFFFFFFFFFFFFFFEULL) };
- }
-
- static unsigned getHashValue(const VRegMaskPair &P) {
- return DenseMapInfo<unsigned>::getHashValue(P.VReg.id()) ^
- DenseMapInfo<uint64_t>::getHashValue(P.LaneMask.getAsInteger());
- }
-
- static bool isEqual(const VRegMaskPair &LHS, const VRegMaskPair &RHS) {
- return DenseMapInfo<unsigned>::isEqual(LHS.VReg.id(), RHS.VReg.id()) &&
- DenseMapInfo<uint64_t>::isEqual(LHS.LaneMask.getAsInteger(),
- RHS.LaneMask.getAsInteger());
- }
-};
class NextUseResult {
friend class AMDGPUNextUseAnalysisWrapper;
@@ -128,9 +80,9 @@ class NextUseResult {
}
bool insert(VRegMaskPair VMP, unsigned Dist) {
- Record R(VMP.LaneMask, Dist);
- if (NextUseMap.contains(VMP.VReg)) {
- SortedRecords &Dists = NextUseMap[VMP.VReg];
+ Record R(VMP.getLaneMask(), Dist);
+ if (NextUseMap.contains(VMP.getVReg())) {
+ SortedRecords &Dists = NextUseMap[VMP.getVReg()];
if (!Dists.contains(R)) {
for (auto D : Dists) {
@@ -151,16 +103,16 @@ class NextUseResult {
return false;
}
} else
- return NextUseMap[VMP.VReg].insert(R).second;
+ return NextUseMap[VMP.getVReg()].insert(R).second;
}
- bool clear(VRegMaskPair VMP) {
- if (NextUseMap.contains(VMP.VReg)) {
- auto &Dists = NextUseMap[VMP.VReg];
- remove_if(Dists,
- [&](Record R) { return (R.first &= ~VMP.LaneMask).none(); });
+ void clear(VRegMaskPair VMP) {
+ if (NextUseMap.contains(VMP.getVReg())) {
+ auto &Dists = NextUseMap[VMP.getVReg()];
+ std::erase_if(Dists,
+ [&](Record R) { return (R.first &= ~VMP.getLaneMask()).none(); });
if (Dists.empty())
- NextUseMap.erase(VMP.VReg);
+ NextUseMap.erase(VMP.getVReg());
}
}
@@ -303,9 +255,13 @@ class NextUseResult {
getSortedSubregUses(const MachineBasicBlock::iterator I,
const VRegMaskPair VMP);
- bool isDead(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
- const VRegMaskPair VMP) {
- if (!VMP.VReg.isVirtual())
+ SmallVector<VRegMaskPair>
+ getSortedSubregUses(const MachineBasicBlock &MBB,
+ const VRegMaskPair VMP);
+
+ bool isDead(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const VRegMaskPair VMP) {
+ if (!VMP.getVReg().isVirtual())
report_fatal_error("Only virtual registers allowed!\n", true);
// FIXME: We use the same Infinity value to indicate both invalid distance
// and too long for out of block values. It is okay if the use out of block
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index 98cca12c3c956..e8577c134b82d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -69,80 +69,100 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
}
for (auto &I : make_range(MBB.getFirstNonPHI(), MBB.end())) {
- // We to support the RENAIMED set to avoid replacing the registers
- // which were not renamed in uses!
+ // 1. if UseMask > DefMask => search names stack to construct REG_SEQUENCE
+ // 2. if UseMask < DefMask => search names stack for the corresponding
+ // sub-register def. Replace reg in use only if VReg found != current VReg
+ // in use!
+ // 3. UseMask == DefMask => just replace the reg if the reg found !=
+ // current reg in use
for (auto &Op : I.uses()) {
if (Op.isReg() && Op.getReg().isVirtual() &&
Renamed.contains(Op.getReg())) {
+ bool RewriteOp = true;
unsigned VReg = Op.getReg();
assert(!VregNames[VReg].empty() &&
"Error: use does not dominated by definition!\n");
- CurVRegInfo VRInfo = VregNames[VReg].back();
- unsigned CurVReg = VRInfo.CurName;
- // Does it meet the TODO above ?
- if (CurVReg == VReg)
- continue;
- unsigned DefSubregIdx = VRInfo.PrevSubRegIdx;
- LaneBitmask DefMask = VRInfo.PrevMask;
- MachineInstr *DefMI = VregNames[VReg].back().DefMI;
- MachineOperand *DefOp = DefMI->findRegisterDefOperand(CurVReg,
- TRI);
-
- // LaneBitmask DefMask = getOperandLaneMask(*DefOp);
- dbgs() << "Def mask : " << PrintLaneMask(DefMask) << "\n";
+ SmallVector<std::tuple<unsigned, unsigned, unsigned>> RegSeqOps;
LaneBitmask UseMask = getOperandLaneMask(Op, TRI, MRI);
dbgs() << "Use mask : " << PrintLaneMask(UseMask) << "\n";
- LaneBitmask UndefSubRegs = UseMask & ~DefMask;
- dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n";
-
+ LaneBitmask UndefSubRegs = UseMask;
unsigned SubRegIdx = AMDGPU::NoRegister;
-
- if (UndefSubRegs.any()) {
- // The closest Def defines not all the subregs used here!
- SmallVector<std::tuple<unsigned, unsigned, unsigned>> RegSeqOps;
-
- RegSeqOps.push_back({CurVReg, DefOp->getSubReg(), DefSubregIdx});
-
- VRegDefStack VregDefs = VregNames[VReg];
-
- VRegDefStack::reverse_iterator It = ++VregDefs.rbegin();
- for (; It != VregDefs.rend(); ++It) {
- // auto CurDef = It->CurDefMI;
- auto R = It->CurName;
- // auto CurDefOp = CurDef->findRegisterDefOperand(R, TRI);
- LaneBitmask DefMask = It->PrevMask;
- dbgs() << "Lanes defined for VReg before renaming : "
- << PrintLaneMask(DefMask) << "\n";
- LaneBitmask CurDefinedBits = DefMask & UndefSubRegs;
- dbgs() << "Defined bits are : " << PrintLaneMask(CurDefinedBits)
- << "\n";
-
- if (unsigned SubRegIdx = getSubRegIndexForLaneMask(CurDefinedBits, TRI))
- RegSeqOps.push_back({R, SubRegIdx, SubRegIdx});
- // clear subregs for which definition is found
- UndefSubRegs &= ~CurDefinedBits;
+ dbgs() << "Looking for appropriate definiton...\n";
+ Register CurVReg = AMDGPU::NoRegister;
+ VRegDefStack VregDefs = VregNames[VReg];
+ VRegDefStack::reverse_iterator It = VregDefs.rbegin();
+ for (; It != VregDefs.rend(); ++It) {
+ CurVRegInfo VRInfo = *It;
+ dbgs() << "Def:\n";
+ CurVReg = VRInfo.CurName;
+ MachineInstr *DefMI = VRInfo.DefMI;
+ MachineOperand *DefOp = DefMI->findRegisterDefOperand(CurVReg, TRI);
+ dbgs() << "DefMI: " << *DefMI << "\n";
+ dbgs() << "Operand: " << *DefOp << "\n";
+ LaneBitmask DefMask = VRInfo.PrevMask;
+ dbgs() << "Def mask : " << PrintLaneMask(DefMask) << "\n";
+
+ LaneBitmask DefinedLanes = (UndefSubRegs & DefMask) & UseMask;
+ dbgs() << "Defined lanes: " << PrintLaneMask(DefinedLanes)
+ << "\n";
+
+ if (DefinedLanes == UseMask) {
+ // All lanes used here are defined by this def.
+ if (CurVReg == VReg && Op.getSubReg() == DefOp->getSubReg()) {
+ // Need nothing - bail out.
+ RewriteOp = false;
+ break;
+ }
+ SubRegIdx = DefOp->getSubReg();
+ if ((DefMask | UseMask) != UseMask) {
+ // Definition defines more lanes then used. Need su register
+ // index;
+ SubRegIdx = getSubRegIndexForLaneMask(UseMask, TRI);
+ }
+ break;
+ }
+
+ if (DefinedLanes.any()) {
+ // Current definition defines some of the lanes used here.
+ RegSeqOps.push_back({CurVReg, DefOp->getSubReg(), It->PrevSubRegIdx});
+ UndefSubRegs = UseMask & ~DefMask;
dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n";
if (UndefSubRegs.none())
break;
+ } else {
+ // The current definition does not define any of the lanes used
+ // here. Continue to search for the definition.
+ dbgs() << "No lanes defined by this def!\n";
+ continue;
}
+ }
+
+ if (!RegSeqOps.empty()) {
// All subreg defs are found. Insert REG_SEQUENCE.
- auto *RC = TRI->getRegClassForOperandReg(*MRI, Op);
+ auto *RC = TRI->getRegClassForReg(*MRI, VReg);
CurVReg = MRI->createVirtualRegister(RC);
- auto RS = BuildMI(MBB, I, I.getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE),
- CurVReg);
+ auto RS = BuildMI(MBB, I, I.getDebugLoc(),
+ TII->get(AMDGPU::REG_SEQUENCE), CurVReg);
for (auto O : RegSeqOps) {
auto [R, SrcSubreg, DstSubreg] = O;
RS.addReg(R, 0, SrcSubreg);
RS.addImm(DstSubreg);
}
- } else {
- if ((DefMask | UseMask) != UseMask) {
- SubRegIdx = getSubRegIndexForLaneMask(UseMask & DefMask, TRI);
- }
+ VregNames[VReg].push_back(
+ {CurVReg, getFullMaskForRC(*RC, TRI), AMDGPU::NoRegister, RS});
+ }
+
+ assert(CurVReg != AMDGPU::NoRegister &&
+ "Use is not dominated by definition!\n");
+
+ if (RewriteOp) {
+ Op.setReg(CurVReg);
+ Op.setSubReg(SubRegIdx);
+ }
+
+ dbgs() << "Rewriting use: " << Op << " to " << CurVReg
+ << " with subreg: " << SubRegIdx << "\n";
}
- Op.setReg(CurVReg);
- Op.setSubReg(SubRegIdx);
- }
}
for (auto &Op : I.defs()) {
@@ -196,6 +216,9 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
renameVRegs(*ChildMBB);
}
+ // FIXME: Instead of poping the names VregNames need to be passed to the
+ // recursion by value. This makes the names stack valid on exit from the
+ // recursion!
for (auto &I : MBB) {
for (auto Op : I.defs()) {
if (Op.getReg().isVirtual()) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h b/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
index dcb187da928b2..e5d2ab820e9d7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
@@ -1,4 +1,4 @@
-//===- AMDGPUNextUseAnalysis.h ----------------------------------------*- C++-
+//===------- AMDGPUSSARAUtils.h ----------------------------------------*- C++-
//*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 7fcf25eac0c99..a1d3c78a38e0c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -55,15 +55,15 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
// return existing stack slot if any or assigns the new one
unsigned assignVirt2StackSlot(VRegMaskPair VMP) {
- assert(VMP.VReg.isVirtual());
+ assert(VMP.getVReg().isVirtual());
if (Virt2StackSlotMap.contains(VMP))
return Virt2StackSlotMap[VMP];
- const TargetRegisterClass *RC = MRI->getRegClass(VMP.VReg);
+ const TargetRegisterClass *RC = MRI->getRegClass(VMP.getVReg());
return Virt2StackSlotMap[VMP] = createSpillSlot(RC);
}
unsigned getStackSlot(VRegMaskPair VMP) {
- assert(VMP.VReg.isVirtual());
+ assert(VMP.getVReg().isVirtual());
return Virt2StackSlotMap[VMP];
}
@@ -151,11 +151,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
unsigned limit(MachineBasicBlock &MBB, RegisterSet &Active, RegisterSet &Spilled,
MachineBasicBlock::iterator I, unsigned Limit);
- unsigned getSizeInRegs(const VRegMaskPair VMP);
- unsigned getSizeInRegs(const RegisterSet VRegs);
-
- const TargetRegisterClass *getRegClassForVregMaskPair(VRegMaskPair VMP,
- unsigned &SubRegIdx);
+ unsigned getRegSetSizeInRegs(const RegisterSet VRegs);
bool takeReg(Register R) {
return ((IsVGPRsPass && TRI->isVGPR(*MRI, R)) ||
@@ -177,11 +173,13 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
SmallVector<VRegMaskPair> Tmp(VRegs.takeVector());
sort(Tmp, SortByDist);
VRegs.insert(Tmp.begin(), Tmp.end());
- LLVM_DEBUG(dbgs() << "\nActive set sorted at " << *I;
- for (auto P : VRegs) {
- printVRegMaskPair(P);
- dbgs() << " : " << M[P] << "\n";
- });
+ LLVM_DEBUG(dbgs() << "\nActive set sorted at ";
+ if (BlockEnd) dbgs() << "end of MBB_" << MBB.getNumber() << "."
+ << MBB.getName() << "\n";
+ else dbgs() << *I; for (auto P : VRegs) {
+ printVRegMaskPair(P);
+ dbgs() << " : " << M[P] << "\n";
+ });
}
// Fills Active until reaches the NumAvailableRegs. If @Capacity is passed
@@ -225,14 +223,14 @@ AMDGPUSSASpiller::dumpRegSet(SetVector<VRegMaskPair> VMPs) {
LLVM_ATTRIBUTE_NOINLINE void
AMDGPUSSASpiller::printVRegMaskPair(const VRegMaskPair P) {
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, P.VReg);
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, P.getVReg());
LaneBitmask FullMask = getFullMaskForRC(*RC, TRI);
dbgs() << "Vreg: [";
- if (P.LaneMask == FullMask) {
- dbgs() << printReg(P.VReg) << "] ";
+ if (P.getLaneMask() == FullMask) {
+ dbgs() << printReg(P.getVReg()) << "] ";
} else {
- unsigned SubRegIndex = getSubRegIndexForLaneMask(P.LaneMask, TRI);
- dbgs() << printReg(P.VReg, TRI, SubRegIndex, MRI) << "] ";
+ unsigned SubRegIndex = getSubRegIndexForLaneMask(P.getLaneMask(), TRI);
+ dbgs() << printReg(P.getVReg(), TRI, SubRegIndex, MRI) << "] ";
}
}
@@ -366,7 +364,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
limit(MBB, Active, Spilled, I, NumAvailableRegs);
unsigned NSpills = limit(MBB, Active, Spilled, std::next(I),
- NumAvailableRegs - getSizeInRegs(Defs));
+ NumAvailableRegs - getRegSetSizeInRegs(Defs));
// T4->startTimer();
@@ -377,7 +375,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
LLVM_DEBUG(dbgs() << "\nReloading "; printVRegMaskPair(R);
dbgs() << "\n");
Register NewVReg = reloadBefore(MBB, I, R);
- rewriteUses(MBB, R.VReg, NewVReg);
+ rewriteUses(MBB, R.getVReg(), NewVReg);
}
std::advance(I, NSpills);
@@ -552,7 +550,7 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
for (auto R : ReloadInPred) {
Register NewVReg = reloadAtEnd(*Pred, R);
- rewriteUses(*Pred, R.VReg, NewVReg);
+ rewriteUses(*Pred, R.getVReg(), NewVReg);
}
}
@@ -652,7 +650,7 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
MachineLoop *L = LI.getLoopFor(&MBB);
for (auto B : L->blocks()) {
RegisterSet Tmp = NU.usedInBlock(*B);
- Tmp.remove_if([&](VRegMaskPair P) { return !takeReg(P.VReg); });
+ Tmp.remove_if([&](VRegMaskPair P) { return !takeReg(P.getVReg()); });
LLVM_DEBUG(dbgs() << "\nBlock " << B->getName()
<< " is part of the loop. Used in block: ";
dumpRegSet(Tmp));
@@ -689,30 +687,6 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
dumpRegSet(getBlockInfo(MBB).ActiveSet));
}
-const TargetRegisterClass *
-AMDGPUSSASpiller::getRegClassForVregMaskPair(VRegMaskPair VMP,
- unsigned &SubRegIdx) {
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VMP.VReg);
- LaneBitmask Mask = getFullMaskForRC(*RC, TRI);
- if (VMP.LaneMask != Mask) {
- unsigned SubRegIdx = getSubRegIndexForLaneMask(VMP.LaneMask, TRI);
- RC = TRI->getSubRegisterClass(RC, SubRegIdx);
- }
-
- // if (!VMP.LaneMask.all()) {
- // SmallVector<unsigned> Idxs;
- // if (TRI->getCoveringSubRegIndexes(*MRI, RC, VMP.LaneMask, Idxs)) {
- // SubRegIdx = Idxs[0];
- // // FIXME: Idxs.size() - 1 ?
- // for (unsigned i = 1; i < Idxs.size() - 1; i++)
- // SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, Idxs[i]);
- // RC = TRI->getSubRegisterClass(RC, SubRegIdx);
- // }
- // }
-
- return RC;
-}
-
Register AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
return reloadBefore(MBB, MBB.getFirstInstrTerminator(), VMP);
}
@@ -724,8 +698,7 @@ void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
Register AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
VRegMaskPair VMP) {
- unsigned SubRegIdx = 0;
- const TargetRegisterClass *RC = getRegClassForVregMaskPair(VMP, SubRegIdx);
+ const TargetRegisterClass *RC = VMP.getRegClass(MRI, TRI);
int FI = getStackSlot(VMP);
Register NewVReg = MRI->createVirtualRegister(RC);
TII->loadRegFromStackSlot(MBB, InsertBefore, NewVReg, FI, RC, TRI, NewVReg);
@@ -742,30 +715,18 @@ Register AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
VRegMaskPair VMP) {
- unsigned SubRegIdx = getSubRegIndexForLaneMask(VMP.LaneMask, TRI);
- const TargetRegisterClass *RC = getRegClassForVregMaskPair(VMP, SubRegIdx);
+ unsigned SubRegIdx = getSubRegIndexForLaneMask(VMP.getLaneMask(), TRI);
+ const TargetRegisterClass *RC = VMP.getRegClass(MRI, TRI);
int FI = assignVirt2StackSlot(VMP);
- TII->storeRegToStackSlot(MBB, InsertBefore, VMP.VReg, true, FI, RC, TRI,
- VMP.VReg, SubRegIdx);
+ TII->storeRegToStackSlot(MBB, InsertBefore, VMP.getVReg(), true, FI, RC, TRI,
+ VMP.getVReg(), SubRegIdx);
// FIXME: dirty hack! To avoid further changing the TargetInstrInfo interface.
MachineInstr &Spill = *(--InsertBefore);
LIS.InsertMachineInstrInMaps(Spill);
- if (LIS.hasInterval(VMP.VReg)) {
-
- LIS.removeInterval(VMP.VReg);
-
- // LiveInterval &LI = LIS.getInterval(VMP.VReg);
- // SlotIndex KillIdx = LIS.getInstructionIndex(Spill);
- // auto LR = LI.find(KillIdx);
- // if (LR != LI.end()) {
- // SlotIndex Start = LR->start;
- // SlotIndex End = LR->end;
- // if (Start < KillIdx) {
- // LI.removeSegment(KillIdx, End);
- // }
- // }
+ if (LIS.hasInterval(VMP.getVReg())) {
+ LIS.removeInterval(VMP.getVReg());
}
SpillPoints[VMP] = &Spill;
}
@@ -825,7 +786,7 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
LLVM_DEBUG(dbgs() << "\n\"limit\": Active set after DEAD VRegs removed:\n";
dumpRegSet(Active));
- unsigned CurRP = getSizeInRegs(Active);
+ unsigned CurRP = getRegSetSizeInRegs(Active);
if (CurRP <= Limit) {
// T2->stopTimer();
return NumSpills;
@@ -838,27 +799,29 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
while (CurRP > Limit) {
auto P = Active.pop_back_val();
- unsigned RegSize = getSizeInRegs(P);
+ unsigned RegSize = P.getSizeInRegs(MRI, TRI);
unsigned SizeToSpill = CurRP - Limit;
if (RegSize > SizeToSpill) {
- LaneBitmask ActiveMask = P.LaneMask;
+ LaneBitmask ActiveMask = P.getLaneMask();
- SmallVector<VRegMaskPair> Sorted = NU.getSortedSubregUses(I, P);
+ SmallVector<VRegMaskPair> Sorted = I == MBB.end()
+ ? NU.getSortedSubregUses(MBB, P)
+ : NU.getSortedSubregUses(I, P);
for (auto S : Sorted) {
- unsigned Size = getSizeInRegs(S);
+ unsigned Size = S.getSizeInRegs(MRI, TRI);
CurRP -= Size;
if (!Spilled.contains(S))
ToSpill.insert(S);
- ActiveMask &= (~S.LaneMask);
+ ActiveMask &= (~S.getLaneMask());
if (CurRP == Limit)
break;
}
if (ActiveMask.any()) {
// Insert the remaining part of the P to the Active set.
- VRegMaskPair Q(P.VReg, ActiveMask);
+ VRegMaskPair Q(P.getVReg(), ActiveMask);
// printVRegMaskPair(Q);
Active.insert(Q);
}
@@ -895,16 +858,16 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
return NumSpills;
}
-unsigned AMDGPUSSASpiller::getSizeInRegs(const VRegMaskPair VMP) {
- unsigned SubRegIdx = 0;
- const TargetRegisterClass *RC = getRegClassForVregMaskPair(VMP, SubRegIdx);
- return TRI->getRegClassWeight(RC).RegWeight;
-}
-unsigned AMDGPUSSASpiller::getSizeInRegs(const RegisterSet VRegs) {
+
+
+
+unsigned AMDGPUSSASpiller::getRegSetSizeInRegs(const RegisterSet VRegs) {
unsigned Size = 0;
- for (auto VMP : VRegs) {
- Size += getSizeInRegs(VMP);
+ for (auto &VMP : VRegs) {
+ printVRegMaskPair(VMP);
+ dbgs() << "\n";
+ Size += VMP.getSizeInRegs(MRI, TRI);
}
return Size;
}
@@ -913,10 +876,10 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Capacity) {
unsigned Limit = Capacity ? Capacity : NumAvailableRegs;
auto &Active = RegisterMap[MBB.getNumber()].ActiveSet;
- unsigned Size = Capacity ? 0 : getSizeInRegs(Active);
+ unsigned Size = Capacity ? 0 : getRegSetSizeInRegs(Active);
sortRegSetAt(MBB, MBB.getFirstNonPHI(), S);
for (auto VMP : S) {
- unsigned RSize = getSizeInRegs(VMP);
+ unsigned RSize = VMP.getSizeInRegs(MRI, TRI);
if (Size + RSize <= Limit) {
Active.insert(VMP);
Size += RSize;
@@ -930,8 +893,8 @@ bool AMDGPUSSASpiller::isCoveredActive(VRegMaskPair VMP,
// printVRegMaskPair(VMP);
// dumpRegSet(Active);
for (auto P : Active) {
- if (P.VReg == VMP.VReg) {
- return (P.LaneMask & VMP.LaneMask) == VMP.LaneMask;
+ if (P.getVReg() == VMP.getVReg()) {
+ return (P.getLaneMask() & VMP.getLaneMask()) == VMP.getLaneMask();
}
}
return false;
diff --git a/llvm/lib/Target/AMDGPU/VRegMaskPair.h b/llvm/lib/Target/AMDGPU/VRegMaskPair.h
new file mode 100644
index 0000000000000..a24d6e06bbcc9
--- /dev/null
+++ b/llvm/lib/Target/AMDGPU/VRegMaskPair.h
@@ -0,0 +1,103 @@
+//===------- VRegMaskPair.h ----------------------------------------*- C++-
+//*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_VREGMASKPAIR_H
+#define LLVM_LIB_TARGET_VREGMASKPAIR_H
+
+#include "llvm/CodeGen/Register.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+
+class VRegMaskPair {
+
+ Register VReg;
+ LaneBitmask LaneMask;
+
+ public:
+ VRegMaskPair(Register VReg, LaneBitmask LaneMask)
+ : VReg(VReg), LaneMask(LaneMask) {}
+
+ VRegMaskPair(const VRegMaskPair &Other) = default;
+ VRegMaskPair(VRegMaskPair &&Other) = default;
+ VRegMaskPair &operator=(const VRegMaskPair &Other) = default;
+ VRegMaskPair &operator=(VRegMaskPair &&Other) = default;
+
+ VRegMaskPair(const MachineOperand MO, const SIRegisterInfo *TRI,
+ const MachineRegisterInfo *MRI) {
+ assert(MO.isReg() && "Not a register operand!");
+ Register R = MO.getReg();
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, R);
+ assert(R.isVirtual() && "Not a virtual register!");
+ VReg = R;
+ LaneMask = getFullMaskForRC(*RC, TRI);
+ unsigned subRegIndex = MO.getSubReg();
+ if (subRegIndex) {
+ LaneMask = TRI->getSubRegIndexLaneMask(subRegIndex);
+ }
+ }
+
+ const Register getVReg() const { return VReg; }
+ const LaneBitmask getLaneMask() const { return LaneMask; }
+
+ const TargetRegisterClass *getRegClass(const MachineRegisterInfo *MRI,
+ const SIRegisterInfo *TRI) const {
+
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
+ LaneBitmask Mask = getFullMaskForRC(*RC, TRI);
+ if (LaneMask != Mask) {
+ unsigned SubRegIdx = getSubRegIndexForLaneMask(LaneMask, TRI);
+ RC = TRI->getSubRegisterClass(RC, SubRegIdx);
+ }
+
+ return RC;
+ }
+
+ unsigned getSizeInRegs(const MachineRegisterInfo *MRI,
+ const SIRegisterInfo *TRI) const {
+ const TargetRegisterClass *RC = getRegClass(MRI, TRI);
+ return TRI->getRegClassWeight(RC).RegWeight;
+ }
+
+ bool operator==(const VRegMaskPair &other) const {
+ return VReg == other.VReg && LaneMask == other.LaneMask;
+ }
+ };
+
+ namespace llvm {
+ template <> struct DenseMapInfo<VRegMaskPair> {
+ static inline VRegMaskPair getEmptyKey() {
+ return {Register(DenseMapInfo<unsigned>::getEmptyKey()),
+ LaneBitmask(0xFFFFFFFFFFFFFFFFULL)};
+ }
+
+ static inline VRegMaskPair getTombstoneKey() {
+ return {Register(DenseMapInfo<unsigned>::getTombstoneKey()),
+ LaneBitmask(0xFFFFFFFFFFFFFFFEULL)};
+ }
+
+ static unsigned getHashValue(const VRegMaskPair &P) {
+ return DenseMapInfo<unsigned>::getHashValue(P.getVReg().id()) ^
+ DenseMapInfo<uint64_t>::getHashValue(
+ P.getLaneMask().getAsInteger());
+ }
+
+ static bool isEqual(const VRegMaskPair &LHS, const VRegMaskPair &RHS) {
+ return DenseMapInfo<unsigned>::isEqual(LHS.getVReg().id(),
+ RHS.getVReg().id()) &&
+ DenseMapInfo<uint64_t>::isEqual(
+ LHS.getLaneMask().getAsInteger(),
+ RHS.getLaneMask().getAsInteger());
+ }
+ };
+ } // namespace llvm
+#endif // LLVM_LIB_TARGET_VREGMASKPAIR_H
\ No newline at end of file
>From de4f8c7d4ac43aba82c91347f8c0d364d59a59f9 Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Thu, 19 Jun 2025 22:25:50 +0200
Subject: [PATCH 30/46] Rebuild SSA: VRegNames stack as argument to the
recursion
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 28 ++++++++-------------
1 file changed, 10 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index e8577c134b82d..79e130fac5101 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -41,7 +41,6 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
DenseMap<unsigned, SmallPtrSet<MachineBasicBlock *, 8>> LiveInBlocks;
DenseMap<unsigned, SmallSet<unsigned, 4>> PHINodes;
DenseMap<MachineInstr *, unsigned> PHIMap;
- DenseMap<unsigned, VRegDefStack> VregNames;
DenseSet<unsigned> DefSeen;
DenseSet<unsigned> Renamed;
@@ -57,7 +56,8 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
IDF.calculate(PHIBlocks);
}
- void renameVRegs(MachineBasicBlock &MBB) {
+ void renameVRegs(MachineBasicBlock &MBB,
+ DenseMap<unsigned, VRegDefStack> VregNames) {
for (auto &PHI : MBB.phis()) {
Register Res = PHI.getOperand(0).getReg();
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, Res);
@@ -69,12 +69,17 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
}
for (auto &I : make_range(MBB.getFirstNonPHI(), MBB.end())) {
+ // Sub-reg handling:
// 1. if UseMask > DefMask => search names stack to construct REG_SEQUENCE
// 2. if UseMask < DefMask => search names stack for the corresponding
// sub-register def. Replace reg in use only if VReg found != current VReg
// in use!
// 3. UseMask == DefMask => just replace the reg if the reg found !=
// current reg in use
+ // DefinedLanes serves as a result of the expression mentioned above.
+ // UndefSubRegs initially is set to UseMask but is updated on each
+ // iteration if we are looking for the sub-regs definitions to compose
+ // REG_SEQUENCE.
for (auto &Op : I.uses()) {
if (Op.isReg() && Op.getReg().isVirtual() &&
Renamed.contains(Op.getReg())) {
@@ -213,20 +218,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
for (auto *Child : Node->children()) {
MachineBasicBlock *ChildMBB = Child->getBlock();
// Process child in the dominator tree
- renameVRegs(*ChildMBB);
- }
-
- // FIXME: Instead of poping the names VregNames need to be passed to the
- // recursion by value. This makes the names stack valid on exit from the
- // recursion!
- for (auto &I : MBB) {
- for (auto Op : I.defs()) {
- if (Op.getReg().isVirtual()) {
- Register VReg = Op.getReg();
- if (!VregNames[VReg].empty())
- VregNames[VReg].pop_back();
- }
- }
+ renameVRegs(*ChildMBB, VregNames);
}
}
@@ -279,7 +271,6 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
DefBlocks.clear();
LiveInBlocks.clear();
PHINodes.clear();
- VregNames.clear();
DefSeen.clear();
Renamed.clear();
@@ -333,7 +324,8 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
}
// Rename virtual registers in the basic block.
- renameVRegs(MF.front());
+ DenseMap<unsigned, VRegDefStack> VregNames;
+ renameVRegs(MF.front(), VregNames);
MF.getProperties().set(MachineFunctionProperties::Property::IsSSA);
MF.getProperties().reset(MachineFunctionProperties::Property ::NoPHIs);
return MRI->isSSA();
>From 1be008e71fb0590b7e20c20716760e4ad6f59239 Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Fri, 27 Jun 2025 17:38:04 +0200
Subject: [PATCH 31/46] Rebuild SSA bugfixing WIP 27.06.25
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 58 +++++++++++++--------
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 6 ++-
2 files changed, 40 insertions(+), 24 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index 79e130fac5101..cfeec24fcb9cb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -66,20 +66,26 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
VregNames[Res].push_back(
{NewVReg, getFullMaskForRC(*RC, TRI), AMDGPU::NoRegister, &PHI});
DefSeen.insert(NewVReg);
+ Renamed.insert(Res);
}
for (auto &I : make_range(MBB.getFirstNonPHI(), MBB.end())) {
// Sub-reg handling:
- // 1. if UseMask > DefMask => search names stack to construct REG_SEQUENCE
- // 2. if UseMask < DefMask => search names stack for the corresponding
- // sub-register def. Replace reg in use only if VReg found != current VReg
- // in use!
- // 3. UseMask == DefMask => just replace the reg if the reg found !=
- // current reg in use
- // DefinedLanes serves as a result of the expression mentioned above.
- // UndefSubRegs initially is set to UseMask but is updated on each
- // iteration if we are looking for the sub-regs definitions to compose
- // REG_SEQUENCE.
+ // 1. if (UseMask & ~DefMask) != 0 : current Def does not define all used
+ // lanes. We should search names stack for the Def that defines missed
+ // lanes to construct the REG_SEQUENCE
+ // 2. if (UseMask & DefMask) == 0 : current Def defines subregisters of a
+ // register which are not used by the current Use. We should search names
+ // stack for the corresponding sub-register def. Replace reg.subreg in Use
+ // only if VReg.subreg found != current VReg.subreg in use!
+ // 3. (UseMask & DefMask) == UseMask just replace the reg if the reg found
+ // != current reg in Use. Take care of the subreg in Use. If (DefMask |
+ // UseMask) != UseMask, i.e. current Def defines more lanes that is used
+ // by the current Use, we need to calculate the corresponding subreg index
+ // for the Use. DefinedLanes serves as a result of the expression
+ // mentioned above. UndefSubRegs initially is set to UseMask but is
+ // updated on each iteration if we are looking for the sub-regs
+ // definitions to compose REG_SEQUENCE.
for (auto &Op : I.uses()) {
if (Op.isReg() && Op.getReg().isVirtual() &&
Renamed.contains(Op.getReg())) {
@@ -91,6 +97,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
LaneBitmask UseMask = getOperandLaneMask(Op, TRI, MRI);
dbgs() << "Use mask : " << PrintLaneMask(UseMask) << "\n";
LaneBitmask UndefSubRegs = UseMask;
+ LaneBitmask DefinedLanes = LaneBitmask::getNone();
unsigned SubRegIdx = AMDGPU::NoRegister;
dbgs() << "Looking for appropriate definiton...\n";
Register CurVReg = AMDGPU::NoRegister;
@@ -106,12 +113,12 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
dbgs() << "Operand: " << *DefOp << "\n";
LaneBitmask DefMask = VRInfo.PrevMask;
dbgs() << "Def mask : " << PrintLaneMask(DefMask) << "\n";
+ LaneBitmask LanesDefinedyCurrentDef =
+ (UndefSubRegs & DefMask) & UseMask;
+ DefinedLanes |= LanesDefinedyCurrentDef;
+ dbgs() << "Defined lanes: " << PrintLaneMask(DefinedLanes) << "\n";
- LaneBitmask DefinedLanes = (UndefSubRegs & DefMask) & UseMask;
- dbgs() << "Defined lanes: " << PrintLaneMask(DefinedLanes)
- << "\n";
-
- if (DefinedLanes == UseMask) {
+ if (LanesDefinedyCurrentDef == UseMask) {
// All lanes used here are defined by this def.
if (CurVReg == VReg && Op.getSubReg() == DefOp->getSubReg()) {
// Need nothing - bail out.
@@ -119,8 +126,8 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
break;
}
SubRegIdx = DefOp->getSubReg();
- if ((DefMask | UseMask) != UseMask) {
- // Definition defines more lanes then used. Need su register
+ if ((DefMask & ~UseMask).any()) {
+ // Definition defines more lanes then used. Need sub register
// index;
SubRegIdx = getSubRegIndexForLaneMask(UseMask, TRI);
}
@@ -129,8 +136,13 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
if (DefinedLanes.any()) {
// Current definition defines some of the lanes used here.
- RegSeqOps.push_back({CurVReg, DefOp->getSubReg(), It->PrevSubRegIdx});
- UndefSubRegs = UseMask & ~DefMask;
+ unsigned DstSubReg =
+ getSubRegIndexForLaneMask(LanesDefinedyCurrentDef, TRI);
+ unsigned SrcSubReg = (DefMask & ~LanesDefinedyCurrentDef).any()
+ ? SubRegIdx
+ : AMDGPU::NoRegister;
+ RegSeqOps.push_back({CurVReg, SrcSubReg, DstSubReg});
+ UndefSubRegs = UseMask & ~DefinedLanes;
dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n";
if (UndefSubRegs.none())
break;
@@ -160,14 +172,14 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
assert(CurVReg != AMDGPU::NoRegister &&
"Use is not dominated by definition!\n");
+ dbgs() << "Rewriting use: " << Op << " to "
+ << printReg(CurVReg, TRI, SubRegIdx, MRI) << "\n";
+
if (RewriteOp) {
Op.setReg(CurVReg);
Op.setSubReg(SubRegIdx);
}
-
- dbgs() << "Rewriting use: " << Op << " to " << CurVReg
- << " with subreg: " << SubRegIdx << "\n";
- }
+ }
}
for (auto &Op : I.defs()) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index a1d3c78a38e0c..49bd3a9895143 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -715,8 +715,12 @@ Register AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
VRegMaskPair VMP) {
- unsigned SubRegIdx = getSubRegIndexForLaneMask(VMP.getLaneMask(), TRI);
+
const TargetRegisterClass *RC = VMP.getRegClass(MRI, TRI);
+ LaneBitmask FullMask = getFullMaskForRC(*RC, TRI);
+ unsigned SubRegIdx = VMP.getLaneMask() == FullMask
+ ? AMDGPU::NoRegister
+ : getSubRegIndexForLaneMask(VMP.getLaneMask(), TRI);
int FI = assignVirt2StackSlot(VMP);
TII->storeRegToStackSlot(MBB, InsertBefore, VMP.getVReg(), true, FI, RC, TRI,
>From 43aa064ebf7fd73623fc9e2c765e8af1eb302bf3 Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Fri, 27 Jun 2025 19:23:41 +0200
Subject: [PATCH 32/46] Rebuild SSA bugfixing WIP 27.06.25 Part 2
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 234 ++++++++++----------
1 file changed, 122 insertions(+), 112 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index cfeec24fcb9cb..c4545c77f6d95 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -56,6 +56,117 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
IDF.calculate(PHIBlocks);
}
+ MachineOperand &rewriteUse(MachineOperand &Op, MachineInstr &I,
+ MachineBasicBlock &MBB,
+ DenseMap<unsigned, VRegDefStack> VregNames) {
+ // Sub-reg handling:
+ // 1. if (UseMask & ~DefMask) != 0 : current Def does not define all used
+ // lanes. We should search names stack for the Def that defines missed
+ // lanes to construct the REG_SEQUENCE
+ // 2. if (UseMask & DefMask) == 0 : current Def defines subregisters of a
+ // register which are not used by the current Use. We should search names
+ // stack for the corresponding sub-register def. Replace reg.subreg in Use
+ // only if VReg.subreg found != current VReg.subreg in use!
+ // 3. (UseMask & DefMask) == UseMask just replace the reg if the reg found
+ // != current reg in Use. Take care of the subreg in Use. If (DefMask |
+ // UseMask) != UseMask, i.e. current Def defines more lanes that is used
+ // by the current Use, we need to calculate the corresponding subreg index
+ // for the Use. DefinedLanes serves as a result of the expression
+ // mentioned above. UndefSubRegs initially is set to UseMask but is
+ // updated on each iteration if we are looking for the sub-regs
+ // definitions to compose REG_SEQUENCE.
+ bool RewriteOp = true;
+ unsigned VReg = Op.getReg();
+ assert(!VregNames[VReg].empty() &&
+ "Error: use does not dominated by definition!\n");
+ SmallVector<std::tuple<unsigned, unsigned, unsigned>> RegSeqOps;
+ LaneBitmask UseMask = getOperandLaneMask(Op, TRI, MRI);
+ dbgs() << "Use mask : " << PrintLaneMask(UseMask) << "\n";
+ LaneBitmask UndefSubRegs = UseMask;
+ LaneBitmask DefinedLanes = LaneBitmask::getNone();
+ unsigned SubRegIdx = AMDGPU::NoRegister;
+ dbgs() << "Looking for appropriate definiton...\n";
+ Register CurVReg = AMDGPU::NoRegister;
+ VRegDefStack VregDefs = VregNames[VReg];
+ VRegDefStack::reverse_iterator It = VregDefs.rbegin();
+ for (; It != VregDefs.rend(); ++It) {
+ CurVRegInfo VRInfo = *It;
+ dbgs() << "Def:\n";
+ CurVReg = VRInfo.CurName;
+ MachineInstr *DefMI = VRInfo.DefMI;
+ MachineOperand *DefOp = DefMI->findRegisterDefOperand(CurVReg, TRI);
+ dbgs() << "DefMI: " << *DefMI << "\n";
+ dbgs() << "Operand: " << *DefOp << "\n";
+ LaneBitmask DefMask = VRInfo.PrevMask;
+ dbgs() << "Def mask : " << PrintLaneMask(DefMask) << "\n";
+ LaneBitmask LanesDefinedyCurrentDef = (UndefSubRegs & DefMask) & UseMask;
+ DefinedLanes |= LanesDefinedyCurrentDef;
+ dbgs() << "Defined lanes: " << PrintLaneMask(DefinedLanes) << "\n";
+
+ if (LanesDefinedyCurrentDef == UseMask) {
+ // All lanes used here are defined by this def.
+ if (CurVReg == VReg && Op.getSubReg() == DefOp->getSubReg()) {
+ // Need nothing - bail out.
+ RewriteOp = false;
+ break;
+ }
+ SubRegIdx = DefOp->getSubReg();
+ if ((DefMask & ~UseMask).any()) {
+ // Definition defines more lanes then used. Need sub register
+ // index;
+ SubRegIdx = getSubRegIndexForLaneMask(UseMask, TRI);
+ }
+ break;
+ }
+
+ if (LanesDefinedyCurrentDef.any()) {
+ // Current definition defines some of the lanes used here.
+ unsigned DstSubReg =
+ getSubRegIndexForLaneMask(LanesDefinedyCurrentDef, TRI);
+ unsigned SrcSubReg = (DefMask & ~LanesDefinedyCurrentDef).any()
+ ? SubRegIdx
+ : AMDGPU::NoRegister;
+ RegSeqOps.push_back({CurVReg, SrcSubReg, DstSubReg});
+ UndefSubRegs = UseMask & ~DefinedLanes;
+ dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n";
+ if (UndefSubRegs.none())
+ break;
+ } else {
+ // The current definition does not define any of the lanes used
+ // here. Continue to search for the definition.
+ dbgs() << "No lanes defined by this def!\n";
+ continue;
+ }
+ }
+
+ if (!RegSeqOps.empty()) {
+ // All subreg defs are found. Insert REG_SEQUENCE.
+ auto *RC = TRI->getRegClassForReg(*MRI, VReg);
+ CurVReg = MRI->createVirtualRegister(RC);
+ auto RS = BuildMI(MBB, I, I.getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE),
+ CurVReg);
+ for (auto O : RegSeqOps) {
+ auto [R, SrcSubreg, DstSubreg] = O;
+ RS.addReg(R, 0, SrcSubreg);
+ RS.addImm(DstSubreg);
+ }
+ VregNames[VReg].push_back(
+ {CurVReg, getFullMaskForRC(*RC, TRI), AMDGPU::NoRegister, RS});
+ }
+
+ assert(CurVReg != AMDGPU::NoRegister &&
+ "Use is not dominated by definition!\n");
+
+ dbgs() << "Rewriting use: " << Op << " to "
+ << printReg(CurVReg, TRI, SubRegIdx, MRI) << "\n";
+
+ if (RewriteOp) {
+ Op.setReg(CurVReg);
+ Op.setSubReg(SubRegIdx);
+ }
+ return Op;
+ }
+
void renameVRegs(MachineBasicBlock &MBB,
DenseMap<unsigned, VRegDefStack> VregNames) {
for (auto &PHI : MBB.phis()) {
@@ -70,115 +181,11 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
}
for (auto &I : make_range(MBB.getFirstNonPHI(), MBB.end())) {
- // Sub-reg handling:
- // 1. if (UseMask & ~DefMask) != 0 : current Def does not define all used
- // lanes. We should search names stack for the Def that defines missed
- // lanes to construct the REG_SEQUENCE
- // 2. if (UseMask & DefMask) == 0 : current Def defines subregisters of a
- // register which are not used by the current Use. We should search names
- // stack for the corresponding sub-register def. Replace reg.subreg in Use
- // only if VReg.subreg found != current VReg.subreg in use!
- // 3. (UseMask & DefMask) == UseMask just replace the reg if the reg found
- // != current reg in Use. Take care of the subreg in Use. If (DefMask |
- // UseMask) != UseMask, i.e. current Def defines more lanes that is used
- // by the current Use, we need to calculate the corresponding subreg index
- // for the Use. DefinedLanes serves as a result of the expression
- // mentioned above. UndefSubRegs initially is set to UseMask but is
- // updated on each iteration if we are looking for the sub-regs
- // definitions to compose REG_SEQUENCE.
+
for (auto &Op : I.uses()) {
if (Op.isReg() && Op.getReg().isVirtual() &&
Renamed.contains(Op.getReg())) {
- bool RewriteOp = true;
- unsigned VReg = Op.getReg();
- assert(!VregNames[VReg].empty() &&
- "Error: use does not dominated by definition!\n");
- SmallVector<std::tuple<unsigned, unsigned, unsigned>> RegSeqOps;
- LaneBitmask UseMask = getOperandLaneMask(Op, TRI, MRI);
- dbgs() << "Use mask : " << PrintLaneMask(UseMask) << "\n";
- LaneBitmask UndefSubRegs = UseMask;
- LaneBitmask DefinedLanes = LaneBitmask::getNone();
- unsigned SubRegIdx = AMDGPU::NoRegister;
- dbgs() << "Looking for appropriate definiton...\n";
- Register CurVReg = AMDGPU::NoRegister;
- VRegDefStack VregDefs = VregNames[VReg];
- VRegDefStack::reverse_iterator It = VregDefs.rbegin();
- for (; It != VregDefs.rend(); ++It) {
- CurVRegInfo VRInfo = *It;
- dbgs() << "Def:\n";
- CurVReg = VRInfo.CurName;
- MachineInstr *DefMI = VRInfo.DefMI;
- MachineOperand *DefOp = DefMI->findRegisterDefOperand(CurVReg, TRI);
- dbgs() << "DefMI: " << *DefMI << "\n";
- dbgs() << "Operand: " << *DefOp << "\n";
- LaneBitmask DefMask = VRInfo.PrevMask;
- dbgs() << "Def mask : " << PrintLaneMask(DefMask) << "\n";
- LaneBitmask LanesDefinedyCurrentDef =
- (UndefSubRegs & DefMask) & UseMask;
- DefinedLanes |= LanesDefinedyCurrentDef;
- dbgs() << "Defined lanes: " << PrintLaneMask(DefinedLanes) << "\n";
-
- if (LanesDefinedyCurrentDef == UseMask) {
- // All lanes used here are defined by this def.
- if (CurVReg == VReg && Op.getSubReg() == DefOp->getSubReg()) {
- // Need nothing - bail out.
- RewriteOp = false;
- break;
- }
- SubRegIdx = DefOp->getSubReg();
- if ((DefMask & ~UseMask).any()) {
- // Definition defines more lanes then used. Need sub register
- // index;
- SubRegIdx = getSubRegIndexForLaneMask(UseMask, TRI);
- }
- break;
- }
-
- if (DefinedLanes.any()) {
- // Current definition defines some of the lanes used here.
- unsigned DstSubReg =
- getSubRegIndexForLaneMask(LanesDefinedyCurrentDef, TRI);
- unsigned SrcSubReg = (DefMask & ~LanesDefinedyCurrentDef).any()
- ? SubRegIdx
- : AMDGPU::NoRegister;
- RegSeqOps.push_back({CurVReg, SrcSubReg, DstSubReg});
- UndefSubRegs = UseMask & ~DefinedLanes;
- dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n";
- if (UndefSubRegs.none())
- break;
- } else {
- // The current definition does not define any of the lanes used
- // here. Continue to search for the definition.
- dbgs() << "No lanes defined by this def!\n";
- continue;
- }
- }
-
- if (!RegSeqOps.empty()) {
- // All subreg defs are found. Insert REG_SEQUENCE.
- auto *RC = TRI->getRegClassForReg(*MRI, VReg);
- CurVReg = MRI->createVirtualRegister(RC);
- auto RS = BuildMI(MBB, I, I.getDebugLoc(),
- TII->get(AMDGPU::REG_SEQUENCE), CurVReg);
- for (auto O : RegSeqOps) {
- auto [R, SrcSubreg, DstSubreg] = O;
- RS.addReg(R, 0, SrcSubreg);
- RS.addImm(DstSubreg);
- }
- VregNames[VReg].push_back(
- {CurVReg, getFullMaskForRC(*RC, TRI), AMDGPU::NoRegister, RS});
- }
-
- assert(CurVReg != AMDGPU::NoRegister &&
- "Use is not dominated by definition!\n");
-
- dbgs() << "Rewriting use: " << Op << " to "
- << printReg(CurVReg, TRI, SubRegIdx, MRI) << "\n";
-
- if (RewriteOp) {
- Op.setReg(CurVReg);
- Op.setSubReg(SubRegIdx);
- }
+ Op = rewriteUse(Op, I, MBB, VregNames);
}
}
@@ -215,12 +222,15 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
PHI.addOperand(MachineOperand::CreateReg(VReg, false, false, false,
false, false));
} else {
- CurVRegInfo VRInfo = VregNames[VReg].back();
- MachineInstr *DefMI = VregNames[VReg].back().DefMI;
- MachineOperand *DefOp = DefMI->findRegisterDefOperand(VRInfo.CurName, TRI);
- PHI.addOperand(MachineOperand::CreateReg(VRInfo.CurName, false, false,
- false, false, false, false,
- DefOp->getSubReg()));
+ // CurVRegInfo VRInfo = VregNames[VReg].back();
+ // MachineInstr *DefMI = VregNames[VReg].back().DefMI;
+ // MachineOperand *DefOp = DefMI->findRegisterDefOperand(VRInfo.CurName, TRI);
+ // PHI.addOperand(MachineOperand::CreateReg(VRInfo.CurName, false, false,
+ // false, false, false, false,
+ // DefOp->getSubReg()));
+ MachineOperand Op = MachineOperand::CreateReg(VReg, false);
+ PHI.addOperand(Op);
+ Op = rewriteUse(Op, PHI, *Succ, VregNames);
}
PHI.addOperand(MachineOperand::CreateMBB(&MBB));
}
>From 6ed26b301a77bee68b81729f499721194c00f211 Mon Sep 17 00:00:00 2001
From: alex-t <atimofee at amd.com>
Date: Sat, 28 Jun 2025 16:21:32 +0200
Subject: [PATCH 33/46] Rebuild SSA bugfixing WIP 27.06.25 Part 3
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 14 +++++++++++---
1 file changed, 11 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index c4545c77f6d95..7febf0453fa35 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -100,8 +100,10 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
LaneBitmask DefMask = VRInfo.PrevMask;
dbgs() << "Def mask : " << PrintLaneMask(DefMask) << "\n";
LaneBitmask LanesDefinedyCurrentDef = (UndefSubRegs & DefMask) & UseMask;
+ dbgs() << "Lanes defined by current Def: "
+ << PrintLaneMask(LanesDefinedyCurrentDef) << "\n";
DefinedLanes |= LanesDefinedyCurrentDef;
- dbgs() << "Defined lanes: " << PrintLaneMask(DefinedLanes) << "\n";
+ dbgs() << "Total defined lanes: " << PrintLaneMask(DefinedLanes) << "\n";
if (LanesDefinedyCurrentDef == UseMask) {
// All lanes used here are defined by this def.
@@ -123,9 +125,15 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
// Current definition defines some of the lanes used here.
unsigned DstSubReg =
getSubRegIndexForLaneMask(LanesDefinedyCurrentDef, TRI);
+ if (!DstSubReg) {
+ // Should never be 0!
+ // Less over all def chain defined granularity
+ // LessDefinedGranularity = ~LanesDefinedyCurrentDef & UseMask (on each individual iteration!)
+ // Scan UndefSubRegs to cover with Mask = LessDefinedGranularity
+ }
unsigned SrcSubReg = (DefMask & ~LanesDefinedyCurrentDef).any()
- ? SubRegIdx
- : AMDGPU::NoRegister;
+ ? DstSubReg
+ : AMDGPU::NoRegister;
RegSeqOps.push_back({CurVReg, SrcSubReg, DstSubReg});
UndefSubRegs = UseMask & ~DefinedLanes;
dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n";
>From 45c3a5d4797e4cab04d49e3faa9c1ac8d6567f56 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Sat, 28 Jun 2025 13:02:55 -0500
Subject: [PATCH 34/46] Rebuild SSA bugfixing WIP 28.06.25
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 105 +++++++++++++++++---
1 file changed, 89 insertions(+), 16 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index 7febf0453fa35..2fab27942a920 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -56,9 +56,50 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
IDF.calculate(PHIBlocks);
}
- MachineOperand &rewriteUse(MachineOperand &Op, MachineInstr &I,
+ MachineOperand &rewriteUse(MachineOperand &Op, MachineBasicBlock::iterator I,
MachineBasicBlock &MBB,
DenseMap<unsigned, VRegDefStack> VregNames) {
+ const std::pair<unsigned, std::string> indexToNameTable[] = {
+ {0, "NoSubRegister"},
+ {1, "hi16"},
+ {2, "lo16"},
+ {3, "sub0"},
+ {4, "sub0_sub1"},
+ {5, "sub0_sub1_sub2"},
+ {6, "sub0_sub1_sub2_sub3"},
+ {7, "sub0_sub1_sub2_sub3_sub4"},
+ {8, "sub0_sub1_sub2_sub3_sub4_sub5"},
+ {9, "sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7"},
+ {10, "sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_"
+ "sub11_sub12_sub13_sub14_sub15"},
+ {11, "sub1"},
+ {12, "sub1_hi16"},
+ {13, "sub1_lo16"},
+ {14, "sub1_sub2"},
+ {15, "sub1_sub2_sub3"},
+ {16, "sub1_sub2_sub3_sub4"},
+ {17, "sub1_sub2_sub3_sub4_sub5"},
+ {18, "sub1_sub2_sub3_sub4_sub5_sub6"},
+ {19, "sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8"},
+ {20, "sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_"
+ "sub12_sub13_sub14_sub15_sub16"},
+ {21, "sub2"},
+ {22, "sub2_hi16"},
+ {23, "sub2_lo16"},
+ {24, "sub2_sub3"},
+ {25, "sub2_sub3_sub4"},
+ {26, "sub2_sub3_sub4_sub5"},
+ {27, "sub2_sub3_sub4_sub5_sub6"},
+ {28, "sub2_sub3_sub4_sub5_sub6_sub7"},
+ {29, "sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9"},
+ {30, "sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_"
+ "sub13_sub14_sub15_sub16_sub17"},
+ {31, "sub3"},
+ {32, "sub3_hi16"},
+ {33, "sub3_lo16"}};
+ std::map<unsigned, std::string> indexToName(
+ indexToNameTable, indexToNameTable + sizeof(indexToNameTable) /
+ sizeof(indexToNameTable[0]));
// Sub-reg handling:
// 1. if (UseMask & ~DefMask) != 0 : current Def does not define all used
// lanes. We should search names stack for the Def that defines missed
@@ -126,15 +167,52 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
unsigned DstSubReg =
getSubRegIndexForLaneMask(LanesDefinedyCurrentDef, TRI);
if (!DstSubReg) {
- // Should never be 0!
- // Less over all def chain defined granularity
- // LessDefinedGranularity = ~LanesDefinedyCurrentDef & UseMask (on each individual iteration!)
- // Scan UndefSubRegs to cover with Mask = LessDefinedGranularity
- }
- unsigned SrcSubReg = (DefMask & ~LanesDefinedyCurrentDef).any()
+ const TargetRegisterClass *RC =
+ TRI->getRegClassForOperandReg(*MRI, *DefOp);
+ SmallVector<unsigned, 8> MatchingSubIndices;
+
+ for (unsigned SubIdx = 1; SubIdx < TRI->getNumSubRegIndices();
+ ++SubIdx) {
+ if (!TRI->getSubClassWithSubReg(RC, SubIdx))
+ continue;
+
+ LaneBitmask SubMask = TRI->getSubRegIndexLaneMask(SubIdx);
+ if ((SubMask & LanesDefinedyCurrentDef).any()) {
+ MatchingSubIndices.push_back(SubIdx);
+ }
+ }
+ for (unsigned SubIdx : MatchingSubIndices) {
+ dbgs() << "Matching subreg: " << indexToName[SubIdx] << " : "
+ << PrintLaneMask(TRI->getSubRegIndexLaneMask(SubIdx))
+ << "\n";
+ }
+
+ SmallVector<unsigned, 8> OptimalSubIndices;
+ llvm::stable_sort(MatchingSubIndices, [&](unsigned A, unsigned B) {
+ return TRI->getSubRegIndexLaneMask(A).getNumLanes() >
+ TRI->getSubRegIndexLaneMask(B).getNumLanes();
+ });
+ for (unsigned SubIdx : MatchingSubIndices) {
+ LaneBitmask SubMask = TRI->getSubRegIndexLaneMask(SubIdx);
+ if ((LanesDefinedyCurrentDef & SubMask) == SubMask) {
+ OptimalSubIndices.push_back(SubIdx);
+ LanesDefinedyCurrentDef &= ~SubMask; // remove covered bits
+ if (LanesDefinedyCurrentDef.none())
+ break;
+ }
+ }
+ for (unsigned SubIdx : OptimalSubIndices) {
+ dbgs() << "Matching subreg: " << indexToName[SubIdx] << " : "
+ << PrintLaneMask(TRI->getSubRegIndexLaneMask(SubIdx))
+ << "\n";
+ RegSeqOps.push_back({CurVReg, SubIdx, SubIdx});
+ }
+ } else {
+ unsigned SrcSubReg = (DefMask & ~LanesDefinedyCurrentDef).any()
? DstSubReg
: AMDGPU::NoRegister;
- RegSeqOps.push_back({CurVReg, SrcSubReg, DstSubReg});
+ RegSeqOps.push_back({CurVReg, SrcSubReg, DstSubReg});
+ }
UndefSubRegs = UseMask & ~DefinedLanes;
dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n";
if (UndefSubRegs.none())
@@ -151,7 +229,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
// All subreg defs are found. Insert REG_SEQUENCE.
auto *RC = TRI->getRegClassForReg(*MRI, VReg);
CurVReg = MRI->createVirtualRegister(RC);
- auto RS = BuildMI(MBB, I, I.getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE),
+ auto RS = BuildMI(MBB, I, I->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE),
CurVReg);
for (auto O : RegSeqOps) {
auto [R, SrcSubreg, DstSubreg] = O;
@@ -230,15 +308,10 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
PHI.addOperand(MachineOperand::CreateReg(VReg, false, false, false,
false, false));
} else {
- // CurVRegInfo VRInfo = VregNames[VReg].back();
- // MachineInstr *DefMI = VregNames[VReg].back().DefMI;
- // MachineOperand *DefOp = DefMI->findRegisterDefOperand(VRInfo.CurName, TRI);
- // PHI.addOperand(MachineOperand::CreateReg(VRInfo.CurName, false, false,
- // false, false, false, false,
- // DefOp->getSubReg()));
MachineOperand Op = MachineOperand::CreateReg(VReg, false);
+ MachineBasicBlock::iterator IP = MBB.getFirstTerminator();
+ Op = rewriteUse(Op, IP, MBB, VregNames);
PHI.addOperand(Op);
- Op = rewriteUse(Op, PHI, *Succ, VregNames);
}
PHI.addOperand(MachineOperand::CreateMBB(&MBB));
}
>From 85952869f45fafb2509ce34c337bde52758e0cfa Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Sat, 5 Jul 2025 12:20:14 -0500
Subject: [PATCH 35/46] Rebuild SSA bugfixing WIP 04.07.25
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 102 +++----------
llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h | 31 ++++
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 156 ++++++++++++--------
3 files changed, 149 insertions(+), 140 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index 2fab27942a920..189bad19597e2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -59,47 +59,6 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
MachineOperand &rewriteUse(MachineOperand &Op, MachineBasicBlock::iterator I,
MachineBasicBlock &MBB,
DenseMap<unsigned, VRegDefStack> VregNames) {
- const std::pair<unsigned, std::string> indexToNameTable[] = {
- {0, "NoSubRegister"},
- {1, "hi16"},
- {2, "lo16"},
- {3, "sub0"},
- {4, "sub0_sub1"},
- {5, "sub0_sub1_sub2"},
- {6, "sub0_sub1_sub2_sub3"},
- {7, "sub0_sub1_sub2_sub3_sub4"},
- {8, "sub0_sub1_sub2_sub3_sub4_sub5"},
- {9, "sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7"},
- {10, "sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_"
- "sub11_sub12_sub13_sub14_sub15"},
- {11, "sub1"},
- {12, "sub1_hi16"},
- {13, "sub1_lo16"},
- {14, "sub1_sub2"},
- {15, "sub1_sub2_sub3"},
- {16, "sub1_sub2_sub3_sub4"},
- {17, "sub1_sub2_sub3_sub4_sub5"},
- {18, "sub1_sub2_sub3_sub4_sub5_sub6"},
- {19, "sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8"},
- {20, "sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_"
- "sub12_sub13_sub14_sub15_sub16"},
- {21, "sub2"},
- {22, "sub2_hi16"},
- {23, "sub2_lo16"},
- {24, "sub2_sub3"},
- {25, "sub2_sub3_sub4"},
- {26, "sub2_sub3_sub4_sub5"},
- {27, "sub2_sub3_sub4_sub5_sub6"},
- {28, "sub2_sub3_sub4_sub5_sub6_sub7"},
- {29, "sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9"},
- {30, "sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_"
- "sub13_sub14_sub15_sub16_sub17"},
- {31, "sub3"},
- {32, "sub3_hi16"},
- {33, "sub3_lo16"}};
- std::map<unsigned, std::string> indexToName(
- indexToNameTable, indexToNameTable + sizeof(indexToNameTable) /
- sizeof(indexToNameTable[0]));
// Sub-reg handling:
// 1. if (UseMask & ~DefMask) != 0 : current Def does not define all used
// lanes. We should search names stack for the Def that defines missed
@@ -136,6 +95,8 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
CurVReg = VRInfo.CurName;
MachineInstr *DefMI = VRInfo.DefMI;
MachineOperand *DefOp = DefMI->findRegisterDefOperand(CurVReg, TRI);
+ const TargetRegisterClass *RC =
+ TRI->getRegClassForOperandReg(*MRI, *DefOp);
dbgs() << "DefMI: " << *DefMI << "\n";
dbgs() << "Operand: " << *DefOp << "\n";
LaneBitmask DefMask = VRInfo.PrevMask;
@@ -167,42 +128,10 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
unsigned DstSubReg =
getSubRegIndexForLaneMask(LanesDefinedyCurrentDef, TRI);
if (!DstSubReg) {
- const TargetRegisterClass *RC =
- TRI->getRegClassForOperandReg(*MRI, *DefOp);
- SmallVector<unsigned, 8> MatchingSubIndices;
-
- for (unsigned SubIdx = 1; SubIdx < TRI->getNumSubRegIndices();
- ++SubIdx) {
- if (!TRI->getSubClassWithSubReg(RC, SubIdx))
- continue;
-
- LaneBitmask SubMask = TRI->getSubRegIndexLaneMask(SubIdx);
- if ((SubMask & LanesDefinedyCurrentDef).any()) {
- MatchingSubIndices.push_back(SubIdx);
- }
- }
- for (unsigned SubIdx : MatchingSubIndices) {
- dbgs() << "Matching subreg: " << indexToName[SubIdx] << " : "
- << PrintLaneMask(TRI->getSubRegIndexLaneMask(SubIdx))
- << "\n";
- }
-
- SmallVector<unsigned, 8> OptimalSubIndices;
- llvm::stable_sort(MatchingSubIndices, [&](unsigned A, unsigned B) {
- return TRI->getSubRegIndexLaneMask(A).getNumLanes() >
- TRI->getSubRegIndexLaneMask(B).getNumLanes();
- });
- for (unsigned SubIdx : MatchingSubIndices) {
- LaneBitmask SubMask = TRI->getSubRegIndexLaneMask(SubIdx);
- if ((LanesDefinedyCurrentDef & SubMask) == SubMask) {
- OptimalSubIndices.push_back(SubIdx);
- LanesDefinedyCurrentDef &= ~SubMask; // remove covered bits
- if (LanesDefinedyCurrentDef.none())
- break;
- }
- }
- for (unsigned SubIdx : OptimalSubIndices) {
- dbgs() << "Matching subreg: " << indexToName[SubIdx] << " : "
+ SmallVector<unsigned> Idxs =
+ getCoveringSubRegsForLaneMask(LanesDefinedyCurrentDef, RC, TRI);
+ for (unsigned SubIdx : Idxs) {
+ dbgs() << "Matching subreg: " << SubIdx << " : "
<< PrintLaneMask(TRI->getSubRegIndexLaneMask(SubIdx))
<< "\n";
RegSeqOps.push_back({CurVReg, SubIdx, SubIdx});
@@ -210,7 +139,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
} else {
unsigned SrcSubReg = (DefMask & ~LanesDefinedyCurrentDef).any()
? DstSubReg
- : AMDGPU::NoRegister;
+ : DefOp->getSubReg();
RegSeqOps.push_back({CurVReg, SrcSubReg, DstSubReg});
}
UndefSubRegs = UseMask & ~DefinedLanes;
@@ -225,6 +154,22 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
}
}
+ if (UndefSubRegs != UseMask && !UndefSubRegs.none()) {
+ // WE haven't found all sub-regs definition. Assume undef.
+ // Insert IMPLISIT_DEF
+
+ const TargetRegisterClass *RC = TRI->getRegClassForOperandReg(*MRI, Op);
+ SmallVector<unsigned> Idxs =
+ getCoveringSubRegsForLaneMask(UndefSubRegs, RC, TRI);
+ for (unsigned SubIdx : Idxs) {
+ const TargetRegisterClass *SubRC = TRI->getSubRegisterClass(RC, SubIdx);
+ Register NewVReg = MRI->createVirtualRegister(SubRC);
+ BuildMI(MBB, I, I->getDebugLoc(), TII->get(AMDGPU::IMPLICIT_DEF))
+ .addReg(NewVReg, RegState::Define);
+ RegSeqOps.push_back({NewVReg, AMDGPU::NoRegister, SubIdx});
+ }
+ }
+
if (!RegSeqOps.empty()) {
// All subreg defs are found. Insert REG_SEQUENCE.
auto *RC = TRI->getRegClassForReg(*MRI, VReg);
@@ -236,6 +181,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
RS.addReg(R, 0, SrcSubreg);
RS.addImm(DstSubreg);
}
+
VregNames[VReg].push_back(
{CurVReg, getFullMaskForRC(*RC, TRI), AMDGPU::NoRegister, RS});
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h b/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
index e5d2ab820e9d7..bf8bb728e5f08 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
@@ -51,4 +51,35 @@ inline unsigned getSubRegIndexForLaneMask(LaneBitmask Mask,
}
return AMDGPU::NoRegister;
}
+
+inline SmallVector<unsigned>
+getCoveringSubRegsForLaneMask(LaneBitmask Mask, const TargetRegisterClass *RC,
+ const SIRegisterInfo *TRI) {
+ SmallVector<unsigned> Candidates;
+ for (unsigned SubIdx = 1; SubIdx < TRI->getNumSubRegIndices(); ++SubIdx) {
+ if (!TRI->getSubClassWithSubReg(RC, SubIdx))
+ continue;
+
+ LaneBitmask SubMask = TRI->getSubRegIndexLaneMask(SubIdx);
+ if ((SubMask & Mask).any()) {
+ Candidates.push_back(SubIdx);
+ }
+ }
+
+ SmallVector<unsigned> OptimalSubIndices;
+ llvm::stable_sort(Candidates, [&](unsigned A, unsigned B) {
+ return TRI->getSubRegIndexLaneMask(A).getNumLanes() >
+ TRI->getSubRegIndexLaneMask(B).getNumLanes();
+ });
+ for (unsigned SubIdx : Candidates) {
+ LaneBitmask SubMask = TRI->getSubRegIndexLaneMask(SubIdx);
+ if ((Mask & SubMask) == SubMask) {
+ OptimalSubIndices.push_back(SubIdx);
+ Mask &= ~SubMask; // remove covered bits
+ if (Mask.none())
+ break;
+ }
+ }
+ return OptimalSubIndices;
+}
#endif // LLVM_LIB_TARGET_AMDGPU_SSA_RA_UTILS_H
\ No newline at end of file
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 49bd3a9895143..325296ad065cd 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -28,13 +28,13 @@ namespace {
class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
LiveIntervals &LIS;
MachineLoopInfo &LI;
+ MachineDominatorTree &MDT;
AMDGPUNextUseAnalysis::Result &NU;
MachineRegisterInfo *MRI;
const SIRegisterInfo *TRI;
const SIInstrInfo *TII;
const GCNSubtarget *ST;
MachineFrameInfo *MFI;
-
unsigned NumSpillSlots;
DenseMap<VRegMaskPair, unsigned> Virt2StackSlotMap;
@@ -137,14 +137,15 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
void initActiveSetUsualBlock(MachineBasicBlock &MBB);
void initActiveSetLoopHeader(MachineBasicBlock &MBB);
- Register reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP);
+ Register reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP,
+ MachineInstr *ReloadMI);
void spillAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP);
- Register reloadBefore(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore, VRegMaskPair VMP);
+ Register reloadBefore(MachineBasicBlock::iterator InsertBefore,
+ VRegMaskPair VMP, MachineInstr *&ReloadMI);
void spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, VRegMaskPair VMP);
- void rewriteUses(MachineBasicBlock &MBB, Register OldVReg, Register NewVReg);
+ void rewriteUses(MachineInstr &I, Register OldVReg, Register NewVReg);
unsigned getLoopMaxRP(MachineLoop *L);
// Returns number of spilled VRegs
@@ -191,8 +192,8 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
public:
AMDGPUSSASpiller(LiveIntervals &LIS, MachineLoopInfo &LI,
- AMDGPUNextUseAnalysis::Result &NU)
- : LIS(LIS), LI(LI), NU(NU), NumSpillSlots(0) {
+ MachineDominatorTree &MDT, AMDGPUNextUseAnalysis::Result &NU)
+ : LIS(LIS), LI(LI), MDT(MDT), NU(NU), NumSpillSlots(0) {
TG = new TimerGroup("SSA SPiller Timing",
"Time Spent in different parts of the SSA Spiller");
T1 = new Timer("General time", "ProcessFunction", *TG);
@@ -361,10 +362,12 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
LLVM_DEBUG(dbgs() << "\nActive set with uses reloaded:\n";
dumpRegSet(Active));
-
+ unsigned NSpills = 0;
limit(MBB, Active, Spilled, I, NumAvailableRegs);
- unsigned NSpills = limit(MBB, Active, Spilled, std::next(I),
- NumAvailableRegs - getRegSetSizeInRegs(Defs));
+ if (!I->isRegSequence()) {
+ NSpills = limit(MBB, Active, Spilled, std::next(I),
+ NumAvailableRegs - getRegSetSizeInRegs(Defs));
+ }
// T4->startTimer();
@@ -374,8 +377,10 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
for (auto R : Reloads) {
LLVM_DEBUG(dbgs() << "\nReloading "; printVRegMaskPair(R);
dbgs() << "\n");
- Register NewVReg = reloadBefore(MBB, I, R);
- rewriteUses(MBB, R.getVReg(), NewVReg);
+ MachineInstr *ReloadMI = nullptr;
+ Register NewVReg = reloadBefore(I, R, ReloadMI);
+ assert(ReloadMI && "NULL returned from reloadBefore\n");
+ rewriteUses(*ReloadMI, R.getVReg(), NewVReg);
}
std::advance(I, NSpills);
@@ -417,7 +422,8 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (ValueSrc->getNumber() == MBB.getNumber()) {
VRegMaskPair VMP(U, TRI, MRI);
if (!isCoveredActive(VMP, Active)) {
- Register NewVReg = reloadAtEnd(MBB, VMP);
+ MachineInstr *ReloadMI = nullptr;
+ Register NewVReg = reloadAtEnd(MBB, VMP, ReloadMI);
// U.setReg(NewVReg);
// U.setSubReg(AMDGPU::NoRegister);
@@ -521,6 +527,41 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
// fail if the CF reached BB3 along the BB0 -> BB3 edge]
// set_intersect(Entry.SpillSet, Entry.ActiveSet);
+ DenseMap<MachineBasicBlock*, RegisterSet> ToSpill;
+ for (auto Pred : Preds) {
+ auto &PE = getBlockInfo(*Pred);
+ LLVM_DEBUG(dbgs() << "\nCurr block [ MBB_" << MBB.getNumber() << "."
+ << MBB.getName() << " ] Active Set:\n";
+ dumpRegSet(Entry.ActiveSet);
+ dbgs() << "\nPred [ MBB_" << Pred->getNumber() << "."
+ << Pred->getName() << " ] ActiveSet:\n";
+ dumpRegSet(PE.ActiveSet));
+ LLVM_DEBUG(dbgs() << "\nCur BB [ MBB_" << MBB.getNumber() << "."
+ << MBB.getName() << " ] SpillSet:\n";
+ dumpRegSet(Entry.SpillSet));
+ LLVM_DEBUG(dbgs() << "\nPred [ MBB_" << Pred->getNumber() << "."
+ << Pred->getName() << " ] SpillSet:\n";
+ dumpRegSet(PE.SpillSet));
+ for (auto S : set_intersection(set_difference(Entry.SpillSet, PE.SpillSet),
+ PE.ActiveSet)) {
+ printVRegMaskPair(S);
+ ToSpill[Pred].insert(S);
+ }
+ }
+
+ for (auto E : ToSpill) {
+ MachineBasicBlock *Pred = E.first;
+ auto &PE = getBlockInfo(*Pred);
+ for (auto S : E.second) {
+ spillAtEnd(*Pred, S);
+ PE.SpillSet.insert(S);
+ PE.ActiveSet.remove(S);
+ dumpRegSet(PE.ActiveSet);
+ Entry.SpillSet.insert(S);
+ Entry.ActiveSet.remove(S);
+ dumpRegSet(Entry.ActiveSet);
+ }
+ }
for (auto Pred : Preds) {
auto &PE = getBlockInfo(*Pred);
@@ -540,31 +581,20 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
dumpRegSet(ReloadInPred);
if (!ReloadInPred.empty()) {
- // Since we operate on SSA, any register that is live across the edge must
- // either be defined before or within the IDom, or be a PHI operand. If a
- // register is neither a PHI operand nor live-out from all predecessors,
- // it must have been spilled in one of them. Registers that are defined
- // and used entirely within a predecessor are dead at its exit. Therefore,
- // there is always room to reload a register that is not live across the
- // edge.
+ // Since we operate on SSA, any register that is live across the edge
+ // must either be defined before or within the IDom, or be a PHI
+ // operand. If a register is neither a PHI operand nor live-out from all
+ // predecessors, it must have been spilled in one of them. Registers
+ // that are defined and used entirely within a predecessor are dead at
+ // its exit. Therefore, there is always room to reload a register that
+ // is not live across the edge.
for (auto R : ReloadInPred) {
- Register NewVReg = reloadAtEnd(*Pred, R);
- rewriteUses(*Pred, R.getVReg(), NewVReg);
+ MachineInstr *ReloadMI = nullptr;
+ Register NewVReg = reloadAtEnd(*Pred, R, ReloadMI);
+ rewriteUses(*ReloadMI, R.getVReg(), NewVReg);
}
}
-
- LLVM_DEBUG(dbgs() << "\nPred [ MBB_" << Pred->getNumber() << "."
- << Pred->getName() << " ] SpillSet:\n";
- dumpRegSet(PE.SpillSet));
- for (auto S : set_intersection(set_difference(Entry.SpillSet, PE.SpillSet),
- PE.ActiveSet)) {
- spillAtEnd(*Pred, S);
- PE.SpillSet.insert(S);
- PE.ActiveSet.remove(S);
- Entry.SpillSet.insert(S);
- Entry.ActiveSet.remove(S);
- }
}
}
@@ -687,27 +717,29 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
dumpRegSet(getBlockInfo(MBB).ActiveSet));
}
-Register AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
- return reloadBefore(MBB, MBB.getFirstInstrTerminator(), VMP);
+Register AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP,
+ MachineInstr *ReloadMI) {
+ return reloadBefore(*MBB.getFirstInstrTerminator(), VMP, ReloadMI);
}
void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
spillBefore(MBB, MBB.getFirstTerminator(), VMP);
}
-Register AMDGPUSSASpiller::reloadBefore(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertBefore,
- VRegMaskPair VMP) {
+Register
+AMDGPUSSASpiller::reloadBefore(MachineBasicBlock::iterator InsertBefore,
+ VRegMaskPair VMP, MachineInstr *&ReloadMI) {
+ MachineBasicBlock *MBB = InsertBefore->getParent();
const TargetRegisterClass *RC = VMP.getRegClass(MRI, TRI);
int FI = getStackSlot(VMP);
Register NewVReg = MRI->createVirtualRegister(RC);
- TII->loadRegFromStackSlot(MBB, InsertBefore, NewVReg, FI, RC, TRI, NewVReg);
+ TII->loadRegFromStackSlot(*MBB, InsertBefore, NewVReg, FI, RC, TRI, NewVReg);
// FIXME: dirty hack! To avoid further changing the TargetInstrInfo interface.
- MachineInstr &ReloadMI = *(--InsertBefore);
- LIS.InsertMachineInstrInMaps(ReloadMI);
+ ReloadMI = &*(--InsertBefore);
+ LIS.InsertMachineInstrInMaps(*ReloadMI);
LIS.createAndComputeVirtRegInterval(NewVReg);
- auto &Entry = getBlockInfo(MBB);
+ auto &Entry = getBlockInfo(*MBB);
Entry.ActiveSet.insert({NewVReg, getFullMaskForRC(*RC, TRI)});
return NewVReg;
}
@@ -735,25 +767,21 @@ void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
SpillPoints[VMP] = &Spill;
}
-void AMDGPUSSASpiller::rewriteUses(MachineBasicBlock &MBB, Register OldVReg,
+void AMDGPUSSASpiller::rewriteUses(MachineInstr &I, Register OldVReg,
Register NewVReg) {
- MachineSSAUpdater SSAUpdater(*MBB.getParent());
+ MachineSSAUpdater SSAUpdater(*I.getParent()->getParent());
SSAUpdater.Initialize(OldVReg);
- SSAUpdater.AddAvailableValue(&MBB, NewVReg);
- for (MachineOperand &UseOp : MRI->use_operands(OldVReg)) {
- MachineInstr *UseMI = UseOp.getParent();
- MachineBasicBlock *UseMBB = UseMI->getParent();
-
- if (UseMBB->getNumber() == MBB.getNumber()) {
- UseOp.setReg(NewVReg);
- UseOp.setSubReg(AMDGPU::NoRegister);
- } else {
- // We skip rewriting if SSAUpdater already has a dominating def for
- // this block
- if (SSAUpdater.HasValueForBlock(UseMBB))
- continue;
- // This rewrites the use to a PHI result or correct value
- SSAUpdater.RewriteUse(UseOp);
+ SSAUpdater.AddAvailableValue(I.getParent(), NewVReg);
+ for (auto &U : MRI->use_operands(OldVReg)) {
+ MachineInstr *UseMI = U.getParent();
+ if (MDT.dominates(&I, UseMI)) {
+ if (I.getParent() == UseMI->getParent()) {
+ // If the use is in the same block, just rewrite it.
+ U.setReg(NewVReg);
+ U.setSubReg(AMDGPU::NoRegister);
+ } else {
+ SSAUpdater.RewriteUse(U);
+ }
}
}
}
@@ -929,7 +957,8 @@ llvm::AMDGPUSSASpillerPass::run(MachineFunction &MF,
LiveIntervals &LIS = MFAM.getResult<LiveIntervalsAnalysis>(MF);
MachineLoopInfo &LI = MFAM.getResult<MachineLoopAnalysis>(MF);
AMDGPUNextUseAnalysis::Result &NU = MFAM.getResult<AMDGPUNextUseAnalysis>(MF);
- AMDGPUSSASpiller Impl(LIS, LI, NU);
+ MachineDominatorTree &MDT = MFAM.getResult<MachineDominatorTreeAnalysis>(MF);
+ AMDGPUSSASpiller Impl(LIS, LI, MDT, NU);
bool Changed = Impl.run(MF);
if (!Changed)
return PreservedAnalyses::all();
@@ -957,6 +986,7 @@ class AMDGPUSSASpillerLegacy : public MachineFunctionPass {
AU.addPreservedID(MachineLoopInfoID);
AU.addRequired<LiveIntervalsWrapperPass>();
AU.addRequired<AMDGPUNextUseAnalysisWrapper>();
+ AU.addRequired<MachineDominatorTreeWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
@@ -966,7 +996,8 @@ bool AMDGPUSSASpillerLegacy::runOnMachineFunction(MachineFunction &MF) {
MachineLoopInfo &LI = getAnalysis<MachineLoopInfoWrapperPass>().getLI();
AMDGPUNextUseAnalysis::Result &NU =
getAnalysis<AMDGPUNextUseAnalysisWrapper>().getNU();
- AMDGPUSSASpiller Impl(LIS, LI, NU);
+ MachineDominatorTree &MDT = getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
+ AMDGPUSSASpiller Impl(LIS, LI, MDT, NU);
return Impl.run(MF);
}
@@ -975,6 +1006,7 @@ INITIALIZE_PASS_BEGIN(AMDGPUSSASpillerLegacy, DEBUG_TYPE, "AMDGPU SSA Spiller",
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AMDGPUNextUseAnalysisWrapper)
+INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
INITIALIZE_PASS_END(AMDGPUSSASpillerLegacy, DEBUG_TYPE, "AMDGPU SSA Spiller",
false, false)
>From 8284d708b982261b8a0d56e90f3a5ae31924bb5c Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Fri, 11 Jul 2025 09:46:38 -0500
Subject: [PATCH 36/46] Rebuild SSA: VRegMaskPair instead of Register
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 94 +++++++++++++--------
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 25 +++---
llvm/lib/Target/AMDGPU/VRegMaskPair.h | 14 ++-
3 files changed, 89 insertions(+), 44 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index 189bad19597e2..9970a702bf50d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -14,6 +14,8 @@
#include <stack>
+#include "VRegMaskPair.h"
+
using namespace llvm;
#define DEBUG_TYPE "amdgpu-rebuild-ssa"
@@ -36,11 +38,11 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
using VRegDefStack = std::vector<CurVRegInfo>;
- SetVector<unsigned> CrossBlockVRegs;
- DenseMap<unsigned, SmallPtrSet<MachineBasicBlock *, 8>> DefBlocks;
- DenseMap<unsigned, SmallPtrSet<MachineBasicBlock *, 8>> LiveInBlocks;
- DenseMap<unsigned, SmallSet<unsigned, 4>> PHINodes;
- DenseMap<MachineInstr *, unsigned> PHIMap;
+ SetVector<VRegMaskPair> CrossBlockVRegs;
+ DenseMap<VRegMaskPair, SmallPtrSet<MachineBasicBlock *, 8>> DefBlocks;
+ DenseMap<VRegMaskPair, SmallPtrSet<MachineBasicBlock *, 8>> LiveInBlocks;
+ DenseMap<unsigned, SetVector<VRegMaskPair>> PHINodes;
+ DenseMap<MachineInstr *, VRegMaskPair> PHIMap;
DenseSet<unsigned> DefSeen;
DenseSet<unsigned> Renamed;
@@ -202,10 +204,16 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
void renameVRegs(MachineBasicBlock &MBB,
DenseMap<unsigned, VRegDefStack> VregNames) {
for (auto &PHI : MBB.phis()) {
- Register Res = PHI.getOperand(0).getReg();
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, Res);
+ MachineOperand &Op = PHI.getOperand(0);
+ Register Res = Op.getReg();
+ unsigned SubRegIdx = Op.getSubReg();
+ const TargetRegisterClass *RC =
+ SubRegIdx ? TRI->getSubRegisterClass(
+ TRI->getRegClassForReg(*MRI, Res), SubRegIdx)
+ : TRI->getRegClassForReg(*MRI, Res);
Register NewVReg = MRI->createVirtualRegister(RC);
- PHI.getOperand(0).setReg(NewVReg);
+ Op.setReg(NewVReg);
+ Op.setSubReg(AMDGPU::NoRegister);
VregNames[Res].push_back(
{NewVReg, getFullMaskForRC(*RC, TRI), AMDGPU::NoRegister, &PHI});
DefSeen.insert(NewVReg);
@@ -249,12 +257,23 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
for (auto Succ : successors(&MBB)) {
for (auto &PHI : Succ->phis()) {
- Register VReg = PHIMap[&PHI];
- if (VregNames[VReg].empty()) {
- PHI.addOperand(MachineOperand::CreateReg(VReg, false, false, false,
- false, false));
+ VRegMaskPair VMP = PHIMap[&PHI];
+ // unsigned SubRegIdx = AMDGPU::NoRegister;
+ // const TargetRegisterClass *RC =
+ // TRI->getRegClassForReg(*MRI, VMP.getVReg());
+ // LaneBitmask FullMask = getFullMaskForRC(*RC, TRI);
+ // if (VMP.getLaneMask() != FullMask) {
+ // SubRegIdx = getSubRegIndexForLaneMask(VMP.getLaneMask(), TRI);
+ // }
+ unsigned SubRegIdx = VMP.getSubReg(MRI, TRI);
+ if (VregNames[VMP.getVReg()].empty()) {
+ PHI.addOperand(MachineOperand::CreateReg(VMP.getVReg(), false, false,
+ false, false, false, false,
+ SubRegIdx));
} else {
- MachineOperand Op = MachineOperand::CreateReg(VReg, false);
+ MachineOperand Op =
+ MachineOperand::CreateReg(VMP.getVReg(), false, false, false,
+ false, false, false, SubRegIdx);
MachineBasicBlock::iterator IP = MBB.getFirstTerminator();
Op = rewriteUse(Op, IP, MBB, VregNames);
PHI.addOperand(Op);
@@ -291,18 +310,20 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
void AMDGPURebuildSSALegacy::collectCrossBlockVRegs(MachineFunction &MF) {
for (auto &MBB : MF) {
- SetVector<unsigned> Killed;
+ SetVector<VRegMaskPair> Killed;
for (auto &I : MBB) {
for (auto Op : I.uses()) {
- if (Op.isReg() && Op.getReg().isVirtual() &&
- !Killed.contains(Op.getReg())) {
- CrossBlockVRegs.insert(Op.getReg());
+ if (Op.isReg() && Op.getReg().isVirtual()) {
+ VRegMaskPair VMP(Op, TRI, MRI);
+ if (!Killed.contains(VMP))
+ CrossBlockVRegs.insert(VMP);
}
}
for (auto Op : I.defs()) {
if (Op.isReg() && Op.getReg().isVirtual()) {
- Killed.insert(Op.getReg());
- DefBlocks[Op.getReg()].insert(&MBB);
+ VRegMaskPair VMP(Op, TRI, MRI);
+ Killed.insert(VMP);
+ DefBlocks[VMP].insert(&MBB);
}
}
}
@@ -330,44 +351,51 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
collectCrossBlockVRegs(MF);
LLVM_DEBUG(dbgs() << "##### Virt regs live cross block ##################\n";
- for (auto VReg : CrossBlockVRegs) {
- dbgs() << Register::virtReg2Index(VReg) << " ";
+ for (auto VMP
+ : CrossBlockVRegs) {
+ dbgs() << Register::virtReg2Index(VMP.getVReg()) << " ";
} dbgs()
<< "\n");
- for (auto VReg : CrossBlockVRegs) {
+ for (auto VMP : CrossBlockVRegs) {
SmallVector<MachineBasicBlock *> PHIBlocks;
for (auto &MBB : MF) {
- LiveRange &LR = LIS->getInterval(VReg);
+ LiveRange &LR = LIS->getInterval(VMP.getVReg());
if (LIS->isLiveInToMBB(LR, &MBB))
- LiveInBlocks[VReg].insert(&MBB);
+ LiveInBlocks[VMP].insert(&MBB);
}
LLVM_DEBUG(
dbgs() << "findPHINodesPlacement input:\nVreg: "
- << Register::virtReg2Index(VReg) << "\n";
- dbgs() << "Def Blocks: \n"; for (auto MBB : DefBlocks[VReg]) {
+ << Register::virtReg2Index(VMP.getVReg()) << "\n";
+ dbgs() << "Def Blocks: \n"; for (auto MBB
+ : DefBlocks[VMP]) {
dbgs() << MBB->getName() << "." << MBB->getNumber() << " ";
} dbgs() << "\nLiveIn Blocks: \n";
- for (auto MBB : LiveInBlocks[VReg]) {
+ for (auto MBB
+ : LiveInBlocks[VMP]) {
dbgs() << MBB->getName() << "." << MBB->getNumber() << " ";
} dbgs()
<< "\n");
- findPHINodesPlacement(LiveInBlocks[VReg], DefBlocks[VReg], PHIBlocks);
+ findPHINodesPlacement(LiveInBlocks[VMP], DefBlocks[VMP],
+ PHIBlocks);
LLVM_DEBUG(dbgs() << "\nBlocks to insert PHI nodes:\n";
for (auto MBB : PHIBlocks) {
dbgs() << MBB->getName() << "." << MBB->getNumber() << " ";
} dbgs()
<< "\n");
for (auto MBB : PHIBlocks) {
- if (!PHINodes[MBB->getNumber()].contains(VReg)) {
+ if (!PHINodes[MBB->getNumber()].contains(VMP)) {
// Insert PHI for VReg. Don't use new VReg here as we'll replace them
// in renaming phase.
- auto PHINode = BuildMI(*MBB, MBB->begin(), DebugLoc(), TII->get(TargetOpcode::PHI))
- .addReg(VReg, RegState::Define);
- PHINodes[MBB->getNumber()].insert(VReg);
- PHIMap[PHINode] = VReg;
+ unsigned SubRegIdx = VMP.getSubReg(MRI, TRI);
+ dbgs() << printReg(VMP.getVReg(), TRI, SubRegIdx) << "\n";
+ auto PHINode =
+ BuildMI(*MBB, MBB->begin(), DebugLoc(), TII->get(TargetOpcode::PHI))
+ .addReg(VMP.getVReg(), RegState::Define, SubRegIdx);
+ PHINodes[MBB->getNumber()].insert(VMP);
+ PHIMap[PHINode] = VMP;
}
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 325296ad065cd..6d190ce836da0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -529,6 +529,8 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
// set_intersect(Entry.SpillSet, Entry.ActiveSet);
DenseMap<MachineBasicBlock*, RegisterSet> ToSpill;
for (auto Pred : Preds) {
+ if (Pred == &MBB)
+ continue;
auto &PE = getBlockInfo(*Pred);
LLVM_DEBUG(dbgs() << "\nCurr block [ MBB_" << MBB.getNumber() << "."
<< MBB.getName() << " ] Active Set:\n";
@@ -564,6 +566,8 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
}
for (auto Pred : Preds) {
+ if (Pred == &MBB)
+ continue;
auto &PE = getBlockInfo(*Pred);
LLVM_DEBUG(dbgs() << "\nCurr block [ MBB_" << MBB.getNumber() << "."
<< MBB.getName() << " ] Active Set:\n";
@@ -748,12 +752,13 @@ void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
VRegMaskPair VMP) {
- const TargetRegisterClass *RC = VMP.getRegClass(MRI, TRI);
- LaneBitmask FullMask = getFullMaskForRC(*RC, TRI);
- unsigned SubRegIdx = VMP.getLaneMask() == FullMask
- ? AMDGPU::NoRegister
- : getSubRegIndexForLaneMask(VMP.getLaneMask(), TRI);
-
+ // const TargetRegisterClass *RC = VMP.getRegClass(MRI, TRI);
+ // LaneBitmask FullMask = getFullMaskForRC(*RC, TRI);
+ // unsigned SubRegIdx = VMP.getLaneMask() == FullMask
+ // ? AMDGPU::NoRegister
+ // : getSubRegIndexForLaneMask(VMP.getLaneMask(), TRI);
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VMP.getVReg());
+ unsigned SubRegIdx = VMP.getSubReg(MRI, TRI);
int FI = assignVirt2StackSlot(VMP);
TII->storeRegToStackSlot(MBB, InsertBefore, VMP.getVReg(), true, FI, RC, TRI,
VMP.getVReg(), SubRegIdx);
@@ -912,10 +917,10 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
sortRegSetAt(MBB, MBB.getFirstNonPHI(), S);
for (auto VMP : S) {
unsigned RSize = VMP.getSizeInRegs(MRI, TRI);
- if (Size + RSize <= Limit) {
- Active.insert(VMP);
- Size += RSize;
- }
+ if (Size + RSize > Limit)
+ break;
+ Active.insert(VMP);
+ Size += RSize;
}
return Size;
}
diff --git a/llvm/lib/Target/AMDGPU/VRegMaskPair.h b/llvm/lib/Target/AMDGPU/VRegMaskPair.h
index a24d6e06bbcc9..2c261d9899189 100644
--- a/llvm/lib/Target/AMDGPU/VRegMaskPair.h
+++ b/llvm/lib/Target/AMDGPU/VRegMaskPair.h
@@ -27,6 +27,8 @@ class VRegMaskPair {
VRegMaskPair(Register VReg, LaneBitmask LaneMask)
: VReg(VReg), LaneMask(LaneMask) {}
+ VRegMaskPair()
+ : VReg(AMDGPU::NoRegister), LaneMask(LaneBitmask::getNone()) {}
VRegMaskPair(const VRegMaskPair &Other) = default;
VRegMaskPair(VRegMaskPair &&Other) = default;
VRegMaskPair &operator=(const VRegMaskPair &Other) = default;
@@ -49,6 +51,15 @@ class VRegMaskPair {
const Register getVReg() const { return VReg; }
const LaneBitmask getLaneMask() const { return LaneMask; }
+ unsigned getSubReg(const MachineRegisterInfo *MRI,
+ const SIRegisterInfo *TRI) const {
+ const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
+ LaneBitmask Mask = getFullMaskForRC(*RC, TRI);
+ if (LaneMask != Mask)
+ return getSubRegIndexForLaneMask(LaneMask, TRI);
+ return AMDGPU::NoRegister;
+ }
+
const TargetRegisterClass *getRegClass(const MachineRegisterInfo *MRI,
const SIRegisterInfo *TRI) const {
@@ -56,7 +67,8 @@ class VRegMaskPair {
LaneBitmask Mask = getFullMaskForRC(*RC, TRI);
if (LaneMask != Mask) {
unsigned SubRegIdx = getSubRegIndexForLaneMask(LaneMask, TRI);
- RC = TRI->getSubRegisterClass(RC, SubRegIdx);
+ // RC = TRI->getSubRegisterClass(RC, SubRegIdx);
+ return TRI->getSubRegisterClass(RC, SubRegIdx);
}
return RC;
>From 215f793b3f209b39cc049718380fef1c5c1b4f0f Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Fri, 11 Jul 2025 15:51:05 +0000
Subject: [PATCH 37/46] SSA Spiller WIP 11.07.25
---
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 88 +++++++++------------
1 file changed, 38 insertions(+), 50 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 6d190ce836da0..7a977de995340 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -137,15 +137,14 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
void initActiveSetUsualBlock(MachineBasicBlock &MBB);
void initActiveSetLoopHeader(MachineBasicBlock &MBB);
- Register reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP,
- MachineInstr *ReloadMI);
+ Register reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP);
void spillAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP);
Register reloadBefore(MachineBasicBlock::iterator InsertBefore,
- VRegMaskPair VMP, MachineInstr *&ReloadMI);
+ VRegMaskPair VMP);
void spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore, VRegMaskPair VMP);
- void rewriteUses(MachineInstr &I, Register OldVReg, Register NewVReg);
+ void rewriteUses(Register OldVReg, Register NewVReg);
unsigned getLoopMaxRP(MachineLoop *L);
// Returns number of spilled VRegs
@@ -304,12 +303,23 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
auto B = I->getOperand(++OpNo);
assert(B.isMBB());
MachineBasicBlock *ValueSrc = B.getMBB();
-
+
if (ProcessedBlocks.contains(ValueSrc->getNumber())) {
auto Info = getBlockInfo(*ValueSrc);
- dumpRegSet(Info.ActiveSet);
- assert(getBlockInfo(*ValueSrc).ActiveSet.contains(VMP) &&
- "PHI node input value is not live out predecessor!");
+ auto SrcActive = Info.ActiveSet;
+ auto SrcSpill = Info.SpillSet;
+ dumpRegSet(SrcActive);
+ dumpRegSet(SrcSpill);
+ assert((SrcActive.contains(VMP) || SrcSpill.contains(VMP)) &&
+ "PHI node input value is neither live out predecessor no "
+ "spilled!");
+ if (SrcSpill.contains(VMP)) {
+ // reload it at the end of the source block
+ Register NewVreg = reloadAtEnd(*ValueSrc, VMP);
+ VRegMaskPair NewVMP(NewVreg, VMP.getLaneMask());
+ rewriteUses(VMP.getVReg(), NewVreg);
+ Active.insert(NewVMP);
+ }
}
continue;
}
@@ -377,10 +387,8 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
for (auto R : Reloads) {
LLVM_DEBUG(dbgs() << "\nReloading "; printVRegMaskPair(R);
dbgs() << "\n");
- MachineInstr *ReloadMI = nullptr;
- Register NewVReg = reloadBefore(I, R, ReloadMI);
- assert(ReloadMI && "NULL returned from reloadBefore\n");
- rewriteUses(*ReloadMI, R.getVReg(), NewVReg);
+ Register NewVReg = reloadBefore(I, R);
+ rewriteUses(R.getVReg(), NewVReg);
}
std::advance(I, NSpills);
@@ -422,30 +430,8 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
if (ValueSrc->getNumber() == MBB.getNumber()) {
VRegMaskPair VMP(U, TRI, MRI);
if (!isCoveredActive(VMP, Active)) {
- MachineInstr *ReloadMI = nullptr;
- Register NewVReg = reloadAtEnd(MBB, VMP, ReloadMI);
- // U.setReg(NewVReg);
- // U.setSubReg(AMDGPU::NoRegister);
-
- // The code below is commented out because of the BUG in
- // MachineSSAUpdater. In case the register class of a PHI operand
- // defined register is a superclass of a NewReg it inserts a COPY
- // AFTER the PHI
-
- // Predecessor:
- // %157:vgpr_32 = SI_SPILL_V32_RESTORE %stack.0
-
- // %146:vreg_64 = PHI %70:vreg_64.sub0, %bb.3, %144:vgpr_32, %bb.1
-
- // becomes:
-
- // %146:vreg_64 = PHI %158:vreg_64.sub0, %bb.3, %144:vgpr_32,
- // %bb.1 %158:vreg_64 = COPY %157
-
- MachineSSAUpdater SSAUpddater(*MBB.getParent());
- SSAUpddater.Initialize(U.getReg());
- SSAUpddater.AddAvailableValue(&MBB, NewVReg);
- SSAUpddater.RewriteUse(U);
+ Register NewVReg = reloadAtEnd(MBB, VMP);
+ rewriteUses(VMP.getVReg(), NewVReg);
}
}
}
@@ -594,9 +580,8 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
// is not live across the edge.
for (auto R : ReloadInPred) {
- MachineInstr *ReloadMI = nullptr;
- Register NewVReg = reloadAtEnd(*Pred, R, ReloadMI);
- rewriteUses(*ReloadMI, R.getVReg(), NewVReg);
+ Register NewVReg = reloadAtEnd(*Pred, R);
+ rewriteUses(R.getVReg(), NewVReg);
}
}
}
@@ -721,9 +706,8 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
dumpRegSet(getBlockInfo(MBB).ActiveSet));
}
-Register AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP,
- MachineInstr *ReloadMI) {
- return reloadBefore(*MBB.getFirstInstrTerminator(), VMP, ReloadMI);
+Register AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
+ return reloadBefore(*MBB.getFirstInstrTerminator(), VMP);
}
void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
@@ -732,14 +716,13 @@ void AMDGPUSSASpiller::spillAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
Register
AMDGPUSSASpiller::reloadBefore(MachineBasicBlock::iterator InsertBefore,
- VRegMaskPair VMP, MachineInstr *&ReloadMI) {
+ VRegMaskPair VMP) {
MachineBasicBlock *MBB = InsertBefore->getParent();
const TargetRegisterClass *RC = VMP.getRegClass(MRI, TRI);
int FI = getStackSlot(VMP);
Register NewVReg = MRI->createVirtualRegister(RC);
TII->loadRegFromStackSlot(*MBB, InsertBefore, NewVReg, FI, RC, TRI, NewVReg);
- // FIXME: dirty hack! To avoid further changing the TargetInstrInfo interface.
- ReloadMI = &*(--InsertBefore);
+ MachineInstr *ReloadMI = MRI->getVRegDef(NewVReg);
LIS.InsertMachineInstrInMaps(*ReloadMI);
LIS.createAndComputeVirtRegInterval(NewVReg);
@@ -772,15 +755,20 @@ void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
SpillPoints[VMP] = &Spill;
}
-void AMDGPUSSASpiller::rewriteUses(MachineInstr &I, Register OldVReg,
+void AMDGPUSSASpiller::rewriteUses(Register OldVReg,
Register NewVReg) {
- MachineSSAUpdater SSAUpdater(*I.getParent()->getParent());
+ MachineInstr *DefMI = MRI->getVRegDef(NewVReg);
+ assert(DefMI);
+ MachineBasicBlock *ReloadBB = DefMI->getParent();
+ MachineFunction *MF = ReloadBB->getParent();
+
+ MachineSSAUpdater SSAUpdater(*MF);
SSAUpdater.Initialize(OldVReg);
- SSAUpdater.AddAvailableValue(I.getParent(), NewVReg);
+ SSAUpdater.AddAvailableValue(ReloadBB, NewVReg);
for (auto &U : MRI->use_operands(OldVReg)) {
MachineInstr *UseMI = U.getParent();
- if (MDT.dominates(&I, UseMI)) {
- if (I.getParent() == UseMI->getParent()) {
+ if (MDT.dominates(DefMI, UseMI)) {
+ if (ReloadBB == UseMI->getParent()) {
// If the use is in the same block, just rewrite it.
U.setReg(NewVReg);
U.setSubReg(AMDGPU::NoRegister);
>From 0d893c49326587f09904fb3312a7a2b32657628a Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Tue, 15 Jul 2025 12:00:50 +0000
Subject: [PATCH 38/46] Rebuild SSA: CFG walk order fixed. PHI nodes result
LaneMask fixed. RegisterSet.contains() changed to isCovered()
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 133 +++++++++++++-------
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 11 +-
2 files changed, 95 insertions(+), 49 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index 9970a702bf50d..1d4391c305864 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -38,6 +38,20 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
using VRegDefStack = std::vector<CurVRegInfo>;
+ void printVRegDefStack(VRegDefStack VregDefs) {
+ VRegDefStack::reverse_iterator It = VregDefs.rbegin();
+ dbgs() << "\n####################################\n";
+ for (; It != VregDefs.rend(); ++It) {
+ CurVRegInfo VRInfo = *It;
+ dbgs() << printReg(VRInfo.CurName, TRI, VRInfo.PrevSubRegIdx) << "\n";
+ MachineInstr *DefMI = VRInfo.DefMI;
+ dbgs() << "DefMI: " << *DefMI << "\n";
+ LaneBitmask DefMask = VRInfo.PrevMask;
+ dbgs() << "Def mask : " << PrintLaneMask(DefMask) << "\n";
+ }
+ dbgs() << "####################################\n";
+ }
+
SetVector<VRegMaskPair> CrossBlockVRegs;
DenseMap<VRegMaskPair, SmallPtrSet<MachineBasicBlock *, 8>> DefBlocks;
DenseMap<VRegMaskPair, SmallPtrSet<MachineBasicBlock *, 8>> LiveInBlocks;
@@ -45,6 +59,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
DenseMap<MachineInstr *, VRegMaskPair> PHIMap;
DenseSet<unsigned> DefSeen;
DenseSet<unsigned> Renamed;
+ DenseSet<unsigned> Visited;
void collectCrossBlockVRegs(MachineFunction &MF);
void findPHINodesPlacement(const SmallPtrSetImpl<MachineBasicBlock *> &LiveInBlocks,
@@ -191,10 +206,9 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
assert(CurVReg != AMDGPU::NoRegister &&
"Use is not dominated by definition!\n");
- dbgs() << "Rewriting use: " << Op << " to "
- << printReg(CurVReg, TRI, SubRegIdx, MRI) << "\n";
-
if (RewriteOp) {
+ dbgs() << "Rewriting use: " << Op << " to "
+ << printReg(CurVReg, TRI, SubRegIdx, MRI) << "\n";
Op.setReg(CurVReg);
Op.setSubReg(SubRegIdx);
}
@@ -203,9 +217,13 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
void renameVRegs(MachineBasicBlock &MBB,
DenseMap<unsigned, VRegDefStack> VregNames) {
+ if (Visited.contains(MBB.getNumber()))
+ return;
+
for (auto &PHI : MBB.phis()) {
MachineOperand &Op = PHI.getOperand(0);
Register Res = Op.getReg();
+ printVRegDefStack(VregNames[Res]);
unsigned SubRegIdx = Op.getSubReg();
const TargetRegisterClass *RC =
SubRegIdx ? TRI->getSubRegisterClass(
@@ -214,8 +232,9 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
Register NewVReg = MRI->createVirtualRegister(RC);
Op.setReg(NewVReg);
Op.setSubReg(AMDGPU::NoRegister);
- VregNames[Res].push_back(
- {NewVReg, getFullMaskForRC(*RC, TRI), AMDGPU::NoRegister, &PHI});
+ VregNames[Res].push_back({NewVReg, TRI->getSubRegIndexLaneMask(SubRegIdx),
+ AMDGPU::NoRegister, &PHI});
+ printVRegDefStack(VregNames[Res]);
DefSeen.insert(NewVReg);
Renamed.insert(Res);
}
@@ -239,6 +258,8 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
VregNames[VReg].push_back({NewVReg,
getOperandLaneMask(Op, TRI, MRI),
Op.getSubReg(), &I});
+ printVRegDefStack(VregNames[VReg]);
+
Op.ChangeToRegister(NewVReg, true, false, false, false, false);
Op.setSubReg(AMDGPU::NoRegister);
@@ -249,22 +270,20 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
} else {
VregNames[VReg].push_back(
{VReg, getOperandLaneMask(Op, TRI, MRI), Op.getSubReg(), &I});
+ printVRegDefStack(VregNames[VReg]);
+
DefSeen.insert(VReg);
}
}
}
}
+ Visited.insert(MBB.getNumber());
+
for (auto Succ : successors(&MBB)) {
for (auto &PHI : Succ->phis()) {
VRegMaskPair VMP = PHIMap[&PHI];
- // unsigned SubRegIdx = AMDGPU::NoRegister;
- // const TargetRegisterClass *RC =
- // TRI->getRegClassForReg(*MRI, VMP.getVReg());
- // LaneBitmask FullMask = getFullMaskForRC(*RC, TRI);
- // if (VMP.getLaneMask() != FullMask) {
- // SubRegIdx = getSubRegIndexForLaneMask(VMP.getLaneMask(), TRI);
- // }
+
unsigned SubRegIdx = VMP.getSubReg(MRI, TRI);
if (VregNames[VMP.getVReg()].empty()) {
PHI.addOperand(MachineOperand::CreateReg(VMP.getVReg(), false, false,
@@ -280,14 +299,12 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
}
PHI.addOperand(MachineOperand::CreateMBB(&MBB));
}
+ renameVRegs(*Succ, VregNames);
}
- // recurse to the succs in DomTree
- DomTreeNodeBase<MachineBasicBlock> *Node = MDT->getNode(&MBB);
- for (auto *Child : Node->children()) {
- MachineBasicBlock *ChildMBB = Child->getBlock();
- // Process child in the dominator tree
- renameVRegs(*ChildMBB, VregNames);
- }
+ }
+
+ Printable printVMP(VRegMaskPair VMP) {
+ return printReg(VMP.getVReg(), TRI, VMP.getSubReg(MRI, TRI));
}
public:
@@ -304,17 +321,28 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
AU.addRequired<LiveIntervalsWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
-};
+ };
} // end anonymous namespace
void AMDGPURebuildSSALegacy::collectCrossBlockVRegs(MachineFunction &MF) {
for (auto &MBB : MF) {
SetVector<VRegMaskPair> Killed;
+ SetVector<VRegMaskPair> Defined;
for (auto &I : MBB) {
for (auto Op : I.uses()) {
if (Op.isReg() && Op.getReg().isVirtual()) {
VRegMaskPair VMP(Op, TRI, MRI);
+ if (!Killed.contains(VMP))
+ for (auto V : Defined) {
+ if (V.getVReg() == VMP.getVReg()) {
+ if ((V.getLaneMask() & VMP.getLaneMask()) ==
+ VMP.getLaneMask()) {
+ Killed.insert(VMP);
+ break;
+ }
+ }
+ }
if (!Killed.contains(VMP))
CrossBlockVRegs.insert(VMP);
}
@@ -322,7 +350,7 @@ void AMDGPURebuildSSALegacy::collectCrossBlockVRegs(MachineFunction &MF) {
for (auto Op : I.defs()) {
if (Op.isReg() && Op.getReg().isVirtual()) {
VRegMaskPair VMP(Op, TRI, MRI);
- Killed.insert(VMP);
+ Defined.insert(VMP);
DefBlocks[VMP].insert(&MBB);
}
}
@@ -343,6 +371,7 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
PHINodes.clear();
DefSeen.clear();
Renamed.clear();
+ Visited.clear();
// Collect all cross-block virtual registers.
// This includes registers that are live-in to the function, and registers
@@ -351,49 +380,65 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
collectCrossBlockVRegs(MF);
LLVM_DEBUG(dbgs() << "##### Virt regs live cross block ##################\n";
- for (auto VMP
- : CrossBlockVRegs) {
- dbgs() << Register::virtReg2Index(VMP.getVReg()) << " ";
- } dbgs()
- << "\n");
+ for (auto VMP : CrossBlockVRegs) { dbgs() << printVMP(VMP) << " "; });
for (auto VMP : CrossBlockVRegs) {
SmallVector<MachineBasicBlock *> PHIBlocks;
- for (auto &MBB : MF) {
- LiveRange &LR = LIS->getInterval(VMP.getVReg());
- if (LIS->isLiveInToMBB(LR, &MBB))
- LiveInBlocks[VMP].insert(&MBB);
+ LiveInterval &LI = LIS->getInterval(VMP.getVReg());
+ if (LI.hasSubRanges()) {
+ for (const LiveInterval::SubRange &SR : LI.subranges()) {
+ LaneBitmask Mask = SR.LaneMask;
+ if ((Mask & VMP.getLaneMask()) == VMP.getLaneMask()) {
+ for (auto &MBB : MF) {
+ if (SR.liveAt(LIS->getMBBStartIdx(&MBB)))
+ LiveInBlocks[VMP].insert(&MBB);
+ }
+ }
+ }
+ } else {
+ for (auto &MBB : MF) {
+ if (LI.liveAt(LIS->getMBBStartIdx(&MBB)))
+ LiveInBlocks[VMP].insert(&MBB);
+ }
+ }
+
+ SmallPtrSet<MachineBasicBlock *, 8> Defs;
+ for(auto E : DefBlocks) {
+ auto V = E.first;
+ if (V.getVReg() == VMP.getVReg()) {
+ if ((V.getLaneMask() & VMP.getLaneMask()) == VMP.getLaneMask()) {
+ Defs.insert(E.second.begin(), E.second.end());
+ }
+ }
}
LLVM_DEBUG(
dbgs() << "findPHINodesPlacement input:\nVreg: "
- << Register::virtReg2Index(VMP.getVReg()) << "\n";
+ << printVMP(VMP)
+ << "\n";
dbgs() << "Def Blocks: \n"; for (auto MBB
- : DefBlocks[VMP]) {
- dbgs() << MBB->getName() << "." << MBB->getNumber() << " ";
+ : Defs) {
+ dbgs() << "MBB_" << MBB->getNumber() << " ";
} dbgs() << "\nLiveIn Blocks: \n";
for (auto MBB
: LiveInBlocks[VMP]) {
- dbgs() << MBB->getName() << "." << MBB->getNumber() << " ";
+ dbgs() << "MBB_" << MBB->getNumber() << " ";
} dbgs()
<< "\n");
- findPHINodesPlacement(LiveInBlocks[VMP], DefBlocks[VMP],
- PHIBlocks);
- LLVM_DEBUG(dbgs() << "\nBlocks to insert PHI nodes:\n";
- for (auto MBB : PHIBlocks) {
- dbgs() << MBB->getName() << "." << MBB->getNumber() << " ";
- } dbgs()
- << "\n");
+ findPHINodesPlacement(LiveInBlocks[VMP], Defs, PHIBlocks);
+ LLVM_DEBUG(dbgs() << "\nBlocks to insert PHI nodes:\n"; for (auto MBB
+ : PHIBlocks) {
+ dbgs() << "MBB_" << MBB->getNumber() << " ";
+ } dbgs() << "\n");
for (auto MBB : PHIBlocks) {
if (!PHINodes[MBB->getNumber()].contains(VMP)) {
// Insert PHI for VReg. Don't use new VReg here as we'll replace them
// in renaming phase.
- unsigned SubRegIdx = VMP.getSubReg(MRI, TRI);
- dbgs() << printReg(VMP.getVReg(), TRI, SubRegIdx) << "\n";
+ printVMP(VMP);
auto PHINode =
BuildMI(*MBB, MBB->begin(), DebugLoc(), TII->get(TargetOpcode::PHI))
- .addReg(VMP.getVReg(), RegState::Define, SubRegIdx);
+ .addReg(VMP.getVReg(), RegState::Define, VMP.getSubReg(MRI, TRI));
PHINodes[MBB->getNumber()].insert(VMP);
PHIMap[PHINode] = VMP;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 7a977de995340..71140b6695b0b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -187,7 +187,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
unsigned fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Capacity = 0);
- bool isCoveredActive(VRegMaskPair VMP, const RegisterSet Active);
+ bool isCoveredByRegSet(VRegMaskPair VMP, const RegisterSet Active);
public:
AMDGPUSSASpiller(LiveIntervals &LIS, MachineLoopInfo &LI,
@@ -310,7 +310,8 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
auto SrcSpill = Info.SpillSet;
dumpRegSet(SrcActive);
dumpRegSet(SrcSpill);
- assert((SrcActive.contains(VMP) || SrcSpill.contains(VMP)) &&
+ assert((isCoveredByRegSet(VMP, SrcActive) ||
+ isCoveredByRegSet(VMP, SrcSpill)) &&
"PHI node input value is neither live out predecessor no "
"spilled!");
if (SrcSpill.contains(VMP)) {
@@ -324,7 +325,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
continue;
}
- if (!isCoveredActive(VMP, Active)) {
+ if (!isCoveredByRegSet(VMP, Active)) {
// Not in reg, hence, should have been spilled before
// FIXME: This is ODD as the Spilled set is a union among all
// predecessors and should already contain all spilled before!
@@ -429,7 +430,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
MachineBasicBlock *ValueSrc = B.getMBB();
if (ValueSrc->getNumber() == MBB.getNumber()) {
VRegMaskPair VMP(U, TRI, MRI);
- if (!isCoveredActive(VMP, Active)) {
+ if (!isCoveredByRegSet(VMP, Active)) {
Register NewVReg = reloadAtEnd(MBB, VMP);
rewriteUses(VMP.getVReg(), NewVReg);
}
@@ -913,7 +914,7 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
return Size;
}
-bool AMDGPUSSASpiller::isCoveredActive(VRegMaskPair VMP,
+bool AMDGPUSSASpiller::isCoveredByRegSet(VRegMaskPair VMP,
const RegisterSet Active) {
// printVRegMaskPair(VMP);
// dumpRegSet(Active);
>From a6cee6d2c612ee1be7e7735952587d6aaebc7f3d Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Wed, 16 Jul 2025 13:56:05 +0000
Subject: [PATCH 39/46] Rebuild SSA refactoring: getFullMaskForRC changed to
MRI->getMaxLaneMaskForVReg, VRegMaskPair::getSizeInRegs changed to use
TRI->getNumCoveredRegs(LaneMask)
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 8 ++---
llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h | 18 +---------
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 39 +++++----------------
llvm/lib/Target/AMDGPU/VRegMaskPair.h | 33 ++++++-----------
4 files changed, 25 insertions(+), 73 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index 1d4391c305864..b5b2d14c03b00 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -191,16 +191,16 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
// All subreg defs are found. Insert REG_SEQUENCE.
auto *RC = TRI->getRegClassForReg(*MRI, VReg);
CurVReg = MRI->createVirtualRegister(RC);
- auto RS = BuildMI(MBB, I, I->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE),
- CurVReg);
+ auto RS = BuildMI(MBB, I, I->getDebugLoc(),
+ TII->get(AMDGPU::REG_SEQUENCE), CurVReg);
for (auto O : RegSeqOps) {
auto [R, SrcSubreg, DstSubreg] = O;
RS.addReg(R, 0, SrcSubreg);
RS.addImm(DstSubreg);
}
- VregNames[VReg].push_back(
- {CurVReg, getFullMaskForRC(*RC, TRI), AMDGPU::NoRegister, RS});
+ VregNames[VReg].push_back({CurVReg, MRI->getMaxLaneMaskForVReg(CurVReg),
+ AMDGPU::NoRegister, RS});
}
assert(CurVReg != AMDGPU::NoRegister &&
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h b/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
index bf8bb728e5f08..0bb163eed59a9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSARAUtils.h
@@ -17,22 +17,6 @@
using namespace llvm;
-inline LaneBitmask getFullMaskForRC(const TargetRegisterClass &RC,
- const SIRegisterInfo *TRI) {
- unsigned Size = TRI->getRegSizeInBits(RC);
- uint64_t IntMask = LaneBitmask::getAll().getAsInteger();
- return LaneBitmask(IntMask >> (LaneBitmask::BitWidth - Size / 16));
-}
-
-inline LaneBitmask getFullMaskForRegOp(const MachineOperand &MO,
- const SIRegisterInfo *TRI,
- MachineRegisterInfo *MRI) {
- assert(MO.isReg() && MO.getReg().isVirtual() &&
- "Error: MachineOperand must be a virtual register!\n");
- const TargetRegisterClass *RC = TRI->getRegClassForOperandReg(*MRI, MO);
- return getFullMaskForRC(*RC, TRI);
-}
-
inline LaneBitmask getOperandLaneMask(const MachineOperand &MO,
const SIRegisterInfo *TRI,
MachineRegisterInfo *MRI) {
@@ -40,7 +24,7 @@ inline LaneBitmask getOperandLaneMask(const MachineOperand &MO,
"Error: Only virtual register allowed!\n");
if (MO.getSubReg())
return TRI->getSubRegIndexLaneMask(MO.getSubReg());
- return getFullMaskForRegOp(MO, TRI, MRI);
+ return MRI->getMaxLaneMaskForVReg(MO.getReg());
}
inline unsigned getSubRegIndexForLaneMask(LaneBitmask Mask,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 71140b6695b0b..6ef083a164458 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -223,8 +223,7 @@ AMDGPUSSASpiller::dumpRegSet(SetVector<VRegMaskPair> VMPs) {
LLVM_ATTRIBUTE_NOINLINE void
AMDGPUSSASpiller::printVRegMaskPair(const VRegMaskPair P) {
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, P.getVReg());
- LaneBitmask FullMask = getFullMaskForRC(*RC, TRI);
+ LaneBitmask FullMask = MRI->getMaxLaneMaskForVReg(P.getVReg());
dbgs() << "Vreg: [";
if (P.getLaneMask() == FullMask) {
dbgs() << printReg(P.getVReg()) << "] ";
@@ -314,7 +313,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
isCoveredByRegSet(VMP, SrcSpill)) &&
"PHI node input value is neither live out predecessor no "
"spilled!");
- if (SrcSpill.contains(VMP)) {
+ if (isCoveredByRegSet(VMP, SrcSpill)) {
// reload it at the end of the source block
Register NewVreg = reloadAtEnd(*ValueSrc, VMP);
VRegMaskPair NewVMP(NewVreg, VMP.getLaneMask());
@@ -634,23 +633,9 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
Register VReg = Register::index2VirtReg(i);
if (!LIS.hasInterval(VReg))
continue;
-
+
if (takeReg(VReg) && LIS.isLiveInToMBB(LIS.getInterval(VReg), &MBB)) {
- // we have to take care ofthe subreg index and set LaneMask accordingly
- // LaneBitmask LaneMask = LaneBitmask::getAll();
- // RegisterSet Preds;
- // for (auto Pred : MBB.predecessors()) {
- // auto PredActive = getBlockInfo(*Pred).ActiveSet;
- // set_intersect()
- // for (auto P : PredActive) {
- // if (P.VReg == VReg) {
- // LaneMask = P.LaneMask;
- // break;
- // }
- // }
- // }
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
- LiveIn.insert(VRegMaskPair(VReg, getFullMaskForRC(*RC, TRI)));
+ LiveIn.insert(VRegMaskPair(VReg, MRI->getMaxLaneMaskForVReg(VReg)));
}
}
@@ -728,19 +713,13 @@ AMDGPUSSASpiller::reloadBefore(MachineBasicBlock::iterator InsertBefore,
LIS.createAndComputeVirtRegInterval(NewVReg);
auto &Entry = getBlockInfo(*MBB);
- Entry.ActiveSet.insert({NewVReg, getFullMaskForRC(*RC, TRI)});
+ Entry.ActiveSet.insert({NewVReg, MRI->getMaxLaneMaskForVReg(NewVReg)});
return NewVReg;
}
void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
VRegMaskPair VMP) {
-
- // const TargetRegisterClass *RC = VMP.getRegClass(MRI, TRI);
- // LaneBitmask FullMask = getFullMaskForRC(*RC, TRI);
- // unsigned SubRegIdx = VMP.getLaneMask() == FullMask
- // ? AMDGPU::NoRegister
- // : getSubRegIndexForLaneMask(VMP.getLaneMask(), TRI);
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VMP.getVReg());
unsigned SubRegIdx = VMP.getSubReg(MRI, TRI);
int FI = assignVirt2StackSlot(VMP);
@@ -825,7 +804,7 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
while (CurRP > Limit) {
auto P = Active.pop_back_val();
- unsigned RegSize = P.getSizeInRegs(MRI, TRI);
+ unsigned RegSize = P.getSizeInRegs(TRI);
unsigned SizeToSpill = CurRP - Limit;
if (RegSize > SizeToSpill) {
@@ -836,7 +815,7 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
: NU.getSortedSubregUses(I, P);
for (auto S : Sorted) {
- unsigned Size = S.getSizeInRegs(MRI, TRI);
+ unsigned Size = S.getSizeInRegs(TRI);
CurRP -= Size;
if (!Spilled.contains(S))
ToSpill.insert(S);
@@ -893,7 +872,7 @@ unsigned AMDGPUSSASpiller::getRegSetSizeInRegs(const RegisterSet VRegs) {
for (auto &VMP : VRegs) {
printVRegMaskPair(VMP);
dbgs() << "\n";
- Size += VMP.getSizeInRegs(MRI, TRI);
+ Size += VMP.getSizeInRegs(TRI);
}
return Size;
}
@@ -905,7 +884,7 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Size = Capacity ? 0 : getRegSetSizeInRegs(Active);
sortRegSetAt(MBB, MBB.getFirstNonPHI(), S);
for (auto VMP : S) {
- unsigned RSize = VMP.getSizeInRegs(MRI, TRI);
+ unsigned RSize = VMP.getSizeInRegs(TRI);
if (Size + RSize > Limit)
break;
Active.insert(VMP);
diff --git a/llvm/lib/Target/AMDGPU/VRegMaskPair.h b/llvm/lib/Target/AMDGPU/VRegMaskPair.h
index 2c261d9899189..561c5d0824ce0 100644
--- a/llvm/lib/Target/AMDGPU/VRegMaskPair.h
+++ b/llvm/lib/Target/AMDGPU/VRegMaskPair.h
@@ -37,15 +37,10 @@ class VRegMaskPair {
VRegMaskPair(const MachineOperand MO, const SIRegisterInfo *TRI,
const MachineRegisterInfo *MRI) {
assert(MO.isReg() && "Not a register operand!");
- Register R = MO.getReg();
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, R);
- assert(R.isVirtual() && "Not a virtual register!");
- VReg = R;
- LaneMask = getFullMaskForRC(*RC, TRI);
- unsigned subRegIndex = MO.getSubReg();
- if (subRegIndex) {
- LaneMask = TRI->getSubRegIndexLaneMask(subRegIndex);
- }
+ assert(MO.getReg().isVirtual() && "Not a virtual register!");
+ VReg = MO.getReg();
+ LaneMask = MO.getSubReg() ? TRI->getSubRegIndexLaneMask(MO.getSubReg())
+ : MRI->getMaxLaneMaskForVReg(VReg);
}
const Register getVReg() const { return VReg; }
@@ -53,31 +48,25 @@ class VRegMaskPair {
unsigned getSubReg(const MachineRegisterInfo *MRI,
const SIRegisterInfo *TRI) const {
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
- LaneBitmask Mask = getFullMaskForRC(*RC, TRI);
- if (LaneMask != Mask)
- return getSubRegIndexForLaneMask(LaneMask, TRI);
- return AMDGPU::NoRegister;
+ LaneBitmask Mask = MRI->getMaxLaneMaskForVReg(VReg);
+ if (LaneMask == Mask)
+ return AMDGPU::NoRegister;
+ return getSubRegIndexForLaneMask(LaneMask, TRI);
}
const TargetRegisterClass *getRegClass(const MachineRegisterInfo *MRI,
const SIRegisterInfo *TRI) const {
-
const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
- LaneBitmask Mask = getFullMaskForRC(*RC, TRI);
+ LaneBitmask Mask = MRI->getMaxLaneMaskForVReg(VReg);
if (LaneMask != Mask) {
unsigned SubRegIdx = getSubRegIndexForLaneMask(LaneMask, TRI);
- // RC = TRI->getSubRegisterClass(RC, SubRegIdx);
return TRI->getSubRegisterClass(RC, SubRegIdx);
}
-
return RC;
}
- unsigned getSizeInRegs(const MachineRegisterInfo *MRI,
- const SIRegisterInfo *TRI) const {
- const TargetRegisterClass *RC = getRegClass(MRI, TRI);
- return TRI->getRegClassWeight(RC).RegWeight;
+ unsigned getSizeInRegs(const SIRegisterInfo *TRI) const {
+ return TRI->getNumCoveredRegs(LaneMask);
}
bool operator==(const VRegMaskPair &other) const {
>From add8ef6633f130f3c0de03d97d71a1a36f09f26e Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Thu, 24 Jul 2025 14:11:07 +0000
Subject: [PATCH 40/46] SSA Spiller: VRegMaskPairSet contaier added and the
whole logic refactored to use it
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 3 +
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 72 ++--
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 5 +-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 119 +++---
llvm/lib/Target/AMDGPU/VRegMaskPair.h | 349 ++++++++++++++++--
llvm/unittests/CodeGen/CMakeLists.txt | 10 +-
llvm/unittests/CodeGen/VRegMaskPairTest.cpp | 178 +++++++++
7 files changed, 601 insertions(+), 135 deletions(-)
create mode 100644 llvm/unittests/CodeGen/VRegMaskPairTest.cpp
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 07c6fc082141b..69c1e7b2f6ab5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -159,6 +159,9 @@ void NextUseResult::analyze(const MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "\nFinal distances for MBB_" << MBB->getNumber()
<< "." << MBB->getName() << "\n";
printVregDistances(Curr));
+ LLVM_DEBUG(dbgs() << "\nPrevious distances for MBB_" << MBB->getNumber()
+ << "." << MBB->getName() << "\n";
+ printVregDistances(Prev));
UpwardNextUses[MBBNum] = std::move(Curr);
bool Changed4MBB = (Prev != UpwardNextUses[MBBNum]);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index 63cab36e1c3ba..ca9b3f837ef85 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -43,9 +43,14 @@ class NextUseResult {
using Record = std::pair<LaneBitmask, unsigned>;
struct CompareByDist {
- bool operator()(const Record &LHS, const Record &RHS) {
- return LHS.second > RHS.second;
- };
+ bool operator()(const Record &LHS, const Record &RHS) const {
+ if (LHS.first ==
+ RHS.first) // Same LaneBitmask → prefer furthest distance
+ return LHS.second > RHS.second;
+ return LHS.first.getAsInteger() <
+ RHS.first.getAsInteger(); // Otherwise sort by LaneBitmask so
+ // that smaller Mask first
+ }
};
using SortedRecords = std::set<Record, CompareByDist>;
@@ -149,38 +154,27 @@ class NextUseResult {
}
void merge(const VRegDistances &Other, unsigned Weight = 0) {
- for (auto P : Other) {
+ for (const auto &P : Other) {
unsigned Key = P.getFirst();
- auto Dists = P.getSecond();
-
- if (NextUseMap.contains(Key)) {
- auto &MineDists = NextUseMap[Key];
- // Merge it!
- for (auto D : Dists) {
- if (!MineDists.contains(D)) {
- // We have a subreg use to merge in.
- bool Exists = false;
- for (auto D1 : MineDists) {
- if (D1.first == D.first) {
- Exists = true;
- if (D1.second > D.second + Weight) {
- // We have a closer use of the same reg and mask.
- // Erase the existing one.
- MineDists.erase(D1);
- MineDists.insert({D.first, D.second + Weight});
- }
- break;
- }
- }
- if (!Exists)
- // Insert a new one.
- MineDists.insert({D.first, D.second + Weight});
- }
+ const auto &OtherDists = P.getSecond();
+ auto &MineDists = NextUseMap[Key]; // creates empty if not present
+
+ for (const auto &D : OtherDists) {
+ Record Adjusted = {D.first, D.second + Weight};
+
+ // Try to find existing record with the same LaneBitmask
+ auto It =
+ std::find_if(MineDists.begin(), MineDists.end(),
+ [&](const Record &R) { return R.first == D.first; });
+
+ if (It == MineDists.end()) {
+ // No record → insert
+ MineDists.insert(Adjusted);
+ } else if (It->second > Adjusted.second) {
+ // Furthest wins (adjusted is more distant) → replace
+ MineDists.erase(It);
+ MineDists.insert(Adjusted);
}
- } else {
- // Just add it!
- for (auto D : Dists)
- NextUseMap[Key].insert({D.first, D.second + Weight});
}
}
}
@@ -206,15 +200,13 @@ class NextUseResult {
LLVM_ATTRIBUTE_NOINLINE void
printSortedRecords(VRegDistances::SortedRecords Records, unsigned VReg,
raw_ostream &O = dbgs()) const {
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VReg);
for (auto X : Records) {
- SmallVector<unsigned> Idxs;
- bool HasSubReg = TRI->getCoveringSubRegIndexes(*MRI, RC, X.first, Idxs);
O << "Vreg: ";
- if (HasSubReg)
- for (auto i : Idxs)
- O << printReg(VReg, TRI, i, MRI) << "[ " << X.second << "]\n";
- else
+ LaneBitmask FullMask = MRI->getMaxLaneMaskForVReg(VReg);
+ if (X.first != FullMask) {
+ unsigned SubRegIdx = getSubRegIndexForLaneMask(X.first, TRI);
+ O << printReg(VReg, TRI, SubRegIdx, MRI) << "[ " << X.second << "]\n";
+ } else
O << printReg(VReg) << "[ " << X.second << "]\n";
}
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index b5b2d14c03b00..bf6d466a2f788 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -232,7 +232,10 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
Register NewVReg = MRI->createVirtualRegister(RC);
Op.setReg(NewVReg);
Op.setSubReg(AMDGPU::NoRegister);
- VregNames[Res].push_back({NewVReg, TRI->getSubRegIndexLaneMask(SubRegIdx),
+ VregNames[Res].push_back({NewVReg,
+ SubRegIdx == AMDGPU::NoRegister
+ ? MRI->getMaxLaneMaskForVReg(Res)
+ : TRI->getSubRegIndexLaneMask(SubRegIdx),
AMDGPU::NoRegister, &PHI});
printVRegDefStack(VregNames[Res]);
DefSeen.insert(NewVReg);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 6ef083a164458..e581a883cb05f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -40,8 +40,8 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
DenseMap<VRegMaskPair, unsigned> Virt2StackSlotMap;
DenseMap<VRegMaskPair, MachineInstr *> SpillPoints;
DenseSet<unsigned> ProcessedBlocks;
-
- LLVM_ATTRIBUTE_NOINLINE void dumpRegSet(SetVector<VRegMaskPair> VMPs);
+ using RegisterSet = VRegMaskPairSet;
+ LLVM_ATTRIBUTE_NOINLINE void dumpRegSet(RegisterSet VMPs);
unsigned createSpillSlot(const TargetRegisterClass *RC) {
unsigned Size = TRI->getSpillSize(*RC);
@@ -72,9 +72,6 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
Timer *T2;
Timer *T3;
Timer *T4;
-
- using RegisterSet = SetVector<VRegMaskPair>;
-
struct SpillInfo {
//MachineBasicBlock *Parent;
RegisterSet ActiveSet;
@@ -170,9 +167,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
return M[LHS] < M[RHS];
};
- SmallVector<VRegMaskPair> Tmp(VRegs.takeVector());
- sort(Tmp, SortByDist);
- VRegs.insert(Tmp.begin(), Tmp.end());
+ VRegs.sort(SortByDist);
LLVM_DEBUG(dbgs() << "\nActive set sorted at ";
if (BlockEnd) dbgs() << "end of MBB_" << MBB.getNumber() << "."
<< MBB.getName() << "\n";
@@ -187,8 +182,6 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
unsigned fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
unsigned Capacity = 0);
- bool isCoveredByRegSet(VRegMaskPair VMP, const RegisterSet Active);
-
public:
AMDGPUSSASpiller(LiveIntervals &LIS, MachineLoopInfo &LI,
MachineDominatorTree &MDT, AMDGPUNextUseAnalysis::Result &NU)
@@ -212,7 +205,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
};
LLVM_ATTRIBUTE_NOINLINE void
-AMDGPUSSASpiller::dumpRegSet(SetVector<VRegMaskPair> VMPs) {
+AMDGPUSSASpiller::dumpRegSet(RegisterSet VMPs) {
dbgs() << "\n";
for (auto P : VMPs) {
printVRegMaskPair(P);
@@ -295,45 +288,22 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
VRegMaskPair VMP(U, TRI, MRI);
// We don't need to make room for the PHI uses as they operands must
- // already present in the corresponding predecessor Active set! Just
- // make sure they really are.
- if (I->isPHI()) {
- auto OpNo = U.getOperandNo();
- auto B = I->getOperand(++OpNo);
- assert(B.isMBB());
- MachineBasicBlock *ValueSrc = B.getMBB();
-
- if (ProcessedBlocks.contains(ValueSrc->getNumber())) {
- auto Info = getBlockInfo(*ValueSrc);
- auto SrcActive = Info.ActiveSet;
- auto SrcSpill = Info.SpillSet;
- dumpRegSet(SrcActive);
- dumpRegSet(SrcSpill);
- assert((isCoveredByRegSet(VMP, SrcActive) ||
- isCoveredByRegSet(VMP, SrcSpill)) &&
- "PHI node input value is neither live out predecessor no "
- "spilled!");
- if (isCoveredByRegSet(VMP, SrcSpill)) {
- // reload it at the end of the source block
- Register NewVreg = reloadAtEnd(*ValueSrc, VMP);
- VRegMaskPair NewVMP(NewVreg, VMP.getLaneMask());
- rewriteUses(VMP.getVReg(), NewVreg);
- Active.insert(NewVMP);
- }
- }
+ // already present in the corresponding predecessor Active set!
+ if (I->isPHI())
continue;
- }
- if (!isCoveredByRegSet(VMP, Active)) {
- // Not in reg, hence, should have been spilled before
- // FIXME: This is ODD as the Spilled set is a union among all
- // predecessors and should already contain all spilled before!
- // SPECIAL CASE: undef
- if (!U.isUndef()) {
- Reloads.insert(VMP);
+ LaneCoverageResult CR = Active.getCoverage(VMP);
+ if (!CR.isFullyCovered()) {
+ VRegMaskPair SpilledVMP(VMP.getVReg(), CR.getNotCovered());
+ assert(Spilled.getCoverage(SpilledVMP).isFullyCovered() &&
+ "Instruction register operand is neither live no "
+ "spilled!");
+
+ if (!U.isUndef()) {
+ Reloads.insert(SpilledVMP);
+ }
}
}
- }
if (I->isPHI()) {
// We don't need to make room for the PHI-defined values as they will be
@@ -375,6 +345,8 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
unsigned NSpills = 0;
limit(MBB, Active, Spilled, I, NumAvailableRegs);
if (!I->isRegSequence()) {
+ // We don't need to make room for the REG_SEQUENCE defs as it is just
+ // combining the registers that are already in Active
NSpills = limit(MBB, Active, Spilled, std::next(I),
NumAvailableRegs - getRegSetSizeInRegs(Defs));
}
@@ -429,8 +401,31 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
MachineBasicBlock *ValueSrc = B.getMBB();
if (ValueSrc->getNumber() == MBB.getNumber()) {
VRegMaskPair VMP(U, TRI, MRI);
- if (!isCoveredByRegSet(VMP, Active)) {
- Register NewVReg = reloadAtEnd(MBB, VMP);
+ LaneCoverageResult CR = Active.getCoverage(VMP);
+ if (!CR.isFullyCovered()) {
+ VRegMaskPair SpilledVMP(VMP.getVReg(), CR.getNotCovered());
+ assert(Spilled.getCoverage(SpilledVMP).isFullyCovered() &&
+ "Instruction register operand is neither live no "
+ "spilled!");
+ Register NewVReg = reloadAtEnd(MBB, SpilledVMP);
+ if (SpilledVMP != VMP) {
+ // insert REG_SEQUENCE(VMP.VReg, getSubreg(CR.getCovered(),
+ // NewVReg, getSubreg(CR.getNotCovered())
+ unsigned SpilledSubReg =
+ getSubRegIndexForLaneMask(SpilledVMP.getLaneMask(), TRI);
+ unsigned ActiveSubReg =
+ getSubRegIndexForLaneMask(CR.getCovered(), TRI);
+ auto *RC = VMP.getRegClass(MRI, TRI);
+ Register FullVReg = MRI->createVirtualRegister(RC);
+ BuildMI(MBB, MBB.getFirstInstrTerminator(),
+ MBB.getFirstInstrTerminator()->getDebugLoc(),
+ TII->get(AMDGPU::REG_SEQUENCE), FullVReg)
+ .addReg(NewVReg, 0, SpilledSubReg)
+ .addImm(SpilledSubReg)
+ .addReg(VMP.getVReg(), 0, ActiveSubReg)
+ .addImm(ActiveSubReg);
+ NewVReg = FullVReg;
+ }
rewriteUses(VMP.getVReg(), NewVReg);
}
}
@@ -658,22 +653,22 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
Tmp.remove_if([&](VRegMaskPair P) { return !takeReg(P.getVReg()); });
LLVM_DEBUG(dbgs() << "\nBlock " << B->getName()
<< " is part of the loop. Used in block: ";
- dumpRegSet(Tmp));
+ Tmp.dump());
UsedInLoop.set_union(Tmp);
}
LLVM_DEBUG(dbgs() << "Total used in loop: "; dumpRegSet(UsedInLoop));
// Take - LiveIns used in Loop. Cand - LiveThrough
- RegisterSet Take = set_intersection(LiveIn, UsedInLoop);
- RegisterSet Cand = set_difference(LiveIn, UsedInLoop);
+ RegisterSet Take = LiveIn.set_intersection(UsedInLoop);
+ RegisterSet Cand = LiveIn.set_difference(UsedInLoop);
// We don't want to reload those not used in the loop which have been already
// spilled.
Cand.set_subtract(Spilled);
LLVM_DEBUG(dbgs() << "\nBlock " << MBB.getName() << "sets\n";
- dbgs() << "Take : "; dumpRegSet(Take); dbgs() << "Cand : ";
- dumpRegSet(Cand));
+ dbgs() << "Take : "; Take.dump(); dbgs() << "Cand : ";
+ Cand.dump());
unsigned TakeSize = fillActiveSet(MBB, Take);
if (TakeSize < NumAvailableRegs) {
@@ -689,7 +684,7 @@ void AMDGPUSSASpiller::initActiveSetLoopHeader(MachineBasicBlock &MBB) {
assert(FullSize <= NumAvailableRegs);
}
LLVM_DEBUG(dbgs() << "\nFinal Loop header Active :";
- dumpRegSet(getBlockInfo(MBB).ActiveSet));
+ getBlockInfo(MBB).ActiveSet.dump());
}
Register AMDGPUSSASpiller::reloadAtEnd(MachineBasicBlock &MBB, VRegMaskPair VMP) {
@@ -817,6 +812,7 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
for (auto S : Sorted) {
unsigned Size = S.getSizeInRegs(TRI);
CurRP -= Size;
+ // TODO: Coverage!
if (!Spilled.contains(S))
ToSpill.insert(S);
ActiveMask &= (~S.getLaneMask());
@@ -833,6 +829,7 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
} else {
CurRP -= RegSize;
+ // TODO: Coverage!
if (!Spilled.contains(P))
ToSpill.insert(P);
}
@@ -869,7 +866,7 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
unsigned AMDGPUSSASpiller::getRegSetSizeInRegs(const RegisterSet VRegs) {
unsigned Size = 0;
- for (auto &VMP : VRegs) {
+ for (auto VMP : VRegs) {
printVRegMaskPair(VMP);
dbgs() << "\n";
Size += VMP.getSizeInRegs(TRI);
@@ -893,18 +890,6 @@ unsigned AMDGPUSSASpiller::fillActiveSet(MachineBasicBlock &MBB, RegisterSet S,
return Size;
}
-bool AMDGPUSSASpiller::isCoveredByRegSet(VRegMaskPair VMP,
- const RegisterSet Active) {
- // printVRegMaskPair(VMP);
- // dumpRegSet(Active);
- for (auto P : Active) {
- if (P.getVReg() == VMP.getVReg()) {
- return (P.getLaneMask() & VMP.getLaneMask()) == VMP.getLaneMask();
- }
- }
- return false;
-}
-
bool AMDGPUSSASpiller::run(MachineFunction &MF) {
ST = &MF.getSubtarget<GCNSubtarget>();
MRI = &MF.getRegInfo();
diff --git a/llvm/lib/Target/AMDGPU/VRegMaskPair.h b/llvm/lib/Target/AMDGPU/VRegMaskPair.h
index 561c5d0824ce0..de4e8b818e28d 100644
--- a/llvm/lib/Target/AMDGPU/VRegMaskPair.h
+++ b/llvm/lib/Target/AMDGPU/VRegMaskPair.h
@@ -1,12 +1,23 @@
-//===------- VRegMaskPair.h ----------------------------------------*- C++-
-//*-===//
+//===------- VRegMaskPair.h ----------------------------------------*-
+//C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-
+///
+/// ile
+/// rief Defines VRegMaskPair and VRegMaskPairSet for managing sets of
+/// virtual registers and their lane masks.
+///
+/// Set operations (union, intersection, subtraction) are implemented based on
+/// *subregister coverage logic* rather than exact equality. This means:
+/// - Two VRegMaskPairs are considered overlapping if their LaneMasks overlap.
+/// - Intersection and subtraction operate on *overlapping masks*, not exact
+/// matches.
+///
+//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_VREGMASKPAIR_H
#define LLVM_LIB_TARGET_VREGMASKPAIR_H
@@ -18,29 +29,31 @@
#include "llvm/Support/Compiler.h"
#include <cassert>
+class VRegMaskPairSet;
+
class VRegMaskPair {
-
- Register VReg;
- LaneBitmask LaneMask;
+ friend class VRegMaskPairSet;
- public:
- VRegMaskPair(Register VReg, LaneBitmask LaneMask)
- : VReg(VReg), LaneMask(LaneMask) {}
+ Register VReg;
+ LaneBitmask LaneMask;
+
+public:
+ VRegMaskPair(Register VReg, LaneBitmask LaneMask)
+ : VReg(VReg), LaneMask(LaneMask) {}
- VRegMaskPair()
- : VReg(AMDGPU::NoRegister), LaneMask(LaneBitmask::getNone()) {}
- VRegMaskPair(const VRegMaskPair &Other) = default;
- VRegMaskPair(VRegMaskPair &&Other) = default;
- VRegMaskPair &operator=(const VRegMaskPair &Other) = default;
- VRegMaskPair &operator=(VRegMaskPair &&Other) = default;
+ VRegMaskPair() : VReg(AMDGPU::NoRegister), LaneMask(LaneBitmask::getNone()) {}
+ VRegMaskPair(const VRegMaskPair &Other) = default;
+ VRegMaskPair(VRegMaskPair &&Other) = default;
+ VRegMaskPair &operator=(const VRegMaskPair &Other) = default;
+ VRegMaskPair &operator=(VRegMaskPair &&Other) = default;
- VRegMaskPair(const MachineOperand MO, const SIRegisterInfo *TRI,
- const MachineRegisterInfo *MRI) {
- assert(MO.isReg() && "Not a register operand!");
- assert(MO.getReg().isVirtual() && "Not a virtual register!");
- VReg = MO.getReg();
- LaneMask = MO.getSubReg() ? TRI->getSubRegIndexLaneMask(MO.getSubReg())
- : MRI->getMaxLaneMaskForVReg(VReg);
+ VRegMaskPair(const MachineOperand MO, const SIRegisterInfo *TRI,
+ const MachineRegisterInfo *MRI) {
+ assert(MO.isReg() && "Not a register operand!");
+ assert(MO.getReg().isVirtual() && "Not a virtual register!");
+ VReg = MO.getReg();
+ LaneMask = MO.getSubReg() ? TRI->getSubRegIndexLaneMask(MO.getSubReg())
+ : MRI->getMaxLaneMaskForVReg(VReg);
}
const Register getVReg() const { return VReg; }
@@ -73,14 +86,299 @@ class VRegMaskPair {
return VReg == other.VReg && LaneMask == other.LaneMask;
}
};
-
+
+ class LaneCoverageResult {
+ friend class VRegMaskPairSet;
+ LaneBitmask Data;
+ LaneBitmask Covered;
+ LaneBitmask NotCovered;
+
+ public:
+ LaneCoverageResult() = default;
+ LaneCoverageResult(const LaneBitmask Mask)
+ : Data(Mask), NotCovered(Mask){};
+ bool isFullyCovered() { return Data == Covered; }
+ bool isFullyUncovered() { return Data == NotCovered; }
+ LaneBitmask getCovered() { return Covered; }
+ LaneBitmask getNotCovered() { return NotCovered; }
+ };
+
+ class VRegMaskPairSet {
+
+ using MaskSet = std::set<LaneBitmask>;
+ using SetStorageT = DenseMap<Register, MaskSet>;
+ using LinearStorageT = std::vector<VRegMaskPair>;
+
+ SetStorageT SetStorage;
+ LinearStorageT LinearStorage;
+
+ public:
+
+ VRegMaskPairSet() = default;
+
+ template <typename ContainerT,
+ typename = std::enable_if_t<std::is_same<
+ typename ContainerT::value_type, VRegMaskPair>::value>>
+ VRegMaskPairSet(const ContainerT &Vec) {
+ for (const auto &VMP : Vec)
+ insert(VMP);
+ }
+
+ template <typename ContainerT,
+ typename = std::enable_if_t<std::is_same<
+ typename ContainerT::value_type, VRegMaskPair>::value>>
+ VRegMaskPairSet(ContainerT &&Vec) {
+ for (auto &&VMP : Vec)
+ insert(std::move(VMP));
+ }
+
+ bool insert(const VRegMaskPair &VMP) {
+ auto &MaskSet = SetStorage[VMP.VReg];
+ auto Inserted = MaskSet.insert(VMP.LaneMask);
+ if (!Inserted.second)
+ return false;
+ LinearStorage.push_back(VMP);
+ return true;
+ }
+
+ template <typename InputIt> void insert(InputIt First, InputIt Last) {
+ for (auto It = First; It != Last; ++It)
+ insert(*It);
+ }
+
+ void remove(const VRegMaskPair &VMP) {
+ auto MapIt = SetStorage.find(VMP.VReg);
+ if (MapIt == SetStorage.end())
+ return;
+
+ size_t Erased = MapIt->second.erase(VMP.LaneMask);
+ if (!Erased)
+ return;
+
+ if (MapIt->second.empty())
+ SetStorage.erase(MapIt);
+
+ auto VecIt = std::find(LinearStorage.begin(), LinearStorage.end(), VMP);
+ if (VecIt != LinearStorage.end()) {
+ LinearStorage.erase(VecIt);
+ } else {
+ llvm_unreachable("Inconsistent LinearStorage: VMP missing on remove");
+ }
+ }
+
+ template <typename Predicate> void remove_if(Predicate Pred) {
+ for (auto It = LinearStorage.begin(); It != LinearStorage.end();) {
+ const VRegMaskPair VMP = *It;
+ if (Pred(VMP)) {
+ It = LinearStorage.erase(It);
+ SetStorage[VMP.VReg].erase(VMP.LaneMask);
+ if (SetStorage[VMP.VReg].empty())
+ SetStorage.erase(VMP.VReg);
+ } else {
+ ++It;
+ }
+ }
+ }
+
+ bool count(const VRegMaskPair &VMP) const {
+ auto It = SetStorage.find(VMP.VReg);
+ if (It == SetStorage.end())
+ return false;
+
+ return It->second.count(VMP.LaneMask) > 0;
+ }
+
+ bool contains(const VRegMaskPair &VMP) const {
+ auto It = SetStorage.find(VMP.VReg);
+ return It != SetStorage.end() && It->second.contains(VMP.LaneMask);
+ }
+
+ void clear() {
+ SetStorage.clear();
+ LinearStorage.clear();
+ }
+
+ size_t size() const { return LinearStorage.size(); }
+ bool empty() const { return LinearStorage.empty(); }
+
+ void
+ sort(llvm::function_ref<bool(const VRegMaskPair &, const VRegMaskPair &)>
+ Cmp) {
+ std::sort(LinearStorage.begin(), LinearStorage.end(), Cmp);
+ }
+
+ VRegMaskPair pop_back_val() {
+ assert(!LinearStorage.empty() && "Pop from empty set");
+ VRegMaskPair VMP = LinearStorage.back();
+ LinearStorage.pop_back();
+
+ auto It = SetStorage.find(VMP.VReg);
+ assert(It != SetStorage.end() && "Inconsistent SetStorage");
+ It->second.erase(VMP.LaneMask);
+ if (It->second.empty())
+ SetStorage.erase(It);
+
+ return VMP;
+ }
+
+ LaneCoverageResult getCoverage(const VRegMaskPair &VMP) const {
+ LaneCoverageResult Result(VMP.LaneMask);
+ auto It = SetStorage.find(VMP.VReg);
+ if (It != SetStorage.end()) {
+ MaskSet Masks = It->second;
+ for (auto Mask : Masks) {
+ Result.Covered |= (Mask & VMP.LaneMask);
+ }
+ Result.NotCovered = (VMP.LaneMask & ~Result.Covered);
+ }
+ return Result;
+ }
+
+ bool operator==(const VRegMaskPairSet &Other) const {
+ if (SetStorage.size() != Other.SetStorage.size())
+ return false;
+
+ for (const auto &Entry : SetStorage) {
+ auto It = Other.SetStorage.find(Entry.first);
+ if (It == Other.SetStorage.end())
+ return false;
+
+ if (Entry.second != It->second)
+ return false;
+ }
+
+ return true;
+ }
+
+ template <typename ContainerT>
+ VRegMaskPairSet &operator=(const ContainerT &Vec) {
+ static_assert(
+ std::is_same<typename ContainerT::value_type, VRegMaskPair>::value,
+ "Container must hold VRegMaskPair elements");
+
+ clear();
+ for (const auto &VMP : Vec)
+ insert(VMP);
+ return *this;
+ }
+
+ // Set operations based on subregister coverage logic
+
+ /// Adds all elements from Other whose (VReg, LaneMask) overlap with none
+ /// in *this.
+ void set_union(const VRegMaskPairSet &Other) {
+ for (const auto &VMP : Other)
+ insert(VMP);
+ }
+
+ /// Keeps only those elements in *this that are at least partially covered
+ /// by Other.
+ void set_intersect(const VRegMaskPairSet &Other) {
+ std::vector<VRegMaskPair> ToInsert;
+ remove_if([&](const VRegMaskPair &VMP) {
+ LaneCoverageResult Cov = Other.getCoverage(VMP);
+ if (Cov.isFullyUncovered())
+ return true;
+
+ if (!Cov.isFullyCovered()) {
+ ToInsert.push_back({VMP.VReg, Cov.getCovered()});
+ return true; // remove current, will reinsert trimmed version
+ }
+
+ return false; // keep as-is
+ });
+
+ insert(ToInsert.begin(), ToInsert.end());
+ }
+
+ /// Removes elements from *this that are at least partially covered by
+ /// Other.
+ void set_subtract(const VRegMaskPairSet &Other) {
+ std::vector<VRegMaskPair> ToInsert;
+ remove_if([&](const VRegMaskPair &VMP) {
+ LaneCoverageResult Cov = Other.getCoverage(VMP);
+ if (Cov.isFullyCovered())
+ return true;
+
+ if (!Cov.isFullyUncovered()) {
+ ToInsert.push_back({VMP.VReg, Cov.getNotCovered()});
+ return true; // remove and reinsert uncovered part
+ }
+
+ return false;
+ });
+
+ insert(ToInsert.begin(), ToInsert.end());
+ }
+
+ /// Returns the union (join) of this set and Other under coverage logic.
+ VRegMaskPairSet set_join(const VRegMaskPairSet &Other) const {
+ VRegMaskPairSet Result = *this;
+ Result.set_union(Other);
+ return Result;
+ }
+
+ /// Returns the intersection of this set and Other based on partial
+ /// overlap.
+ VRegMaskPairSet set_intersection(const VRegMaskPairSet &Other) const {
+ VRegMaskPairSet Result;
+ for (const auto &VMP : *this) {
+ LaneCoverageResult Cov = Other.getCoverage(VMP);
+ if (!Cov.isFullyUncovered()) {
+ Result.insert({VMP.VReg, Cov.getCovered()});
+ }
+ }
+ return Result;
+ }
+
+ /// Returns all elements of *this that do not overlap with anything in
+ /// Other.
+ VRegMaskPairSet set_difference(const VRegMaskPairSet &Other) const {
+ VRegMaskPairSet Result;
+ for (const auto &VMP : *this) {
+ LaneCoverageResult Cov = Other.getCoverage(VMP);
+ if (!Cov.isFullyCovered()) {
+ Result.insert({VMP.VReg, Cov.getNotCovered()});
+ }
+ }
+ return Result;
+ }
+
+ // Debug
+ void dump() const {
+ dbgs() << "=== VRegMaskPairSet Dump ===\n";
+
+ dbgs() << "SetStorage:\n";
+ for (const auto &Entry : SetStorage) {
+ dbgs() << " VReg: " << printReg(Entry.first) << " => { ";
+ for (const auto &Mask : Entry.second) {
+ dbgs() << PrintLaneMask(Mask) << " ";
+ }
+ dbgs() << "}\n";
+ }
+
+ dbgs() << "LinearStorage (insertion order):\n";
+ for (const auto &VMP : LinearStorage) {
+ dbgs() << " (" << printReg(VMP.getVReg()) << ", "
+ << PrintLaneMask(VMP.getLaneMask()) << ")\n";
+ }
+
+ dbgs() << "=============================\n";
+ }
+
+ // Iterators
+ using iterator = LinearStorageT::const_iterator;
+ iterator begin() const { return LinearStorage.begin(); }
+ iterator end() const { return LinearStorage.end(); }
+ };
+
namespace llvm {
template <> struct DenseMapInfo<VRegMaskPair> {
static inline VRegMaskPair getEmptyKey() {
return {Register(DenseMapInfo<unsigned>::getEmptyKey()),
LaneBitmask(0xFFFFFFFFFFFFFFFFULL)};
}
-
+
static inline VRegMaskPair getTombstoneKey() {
return {Register(DenseMapInfo<unsigned>::getTombstoneKey()),
LaneBitmask(0xFFFFFFFFFFFFFFFEULL)};
@@ -100,5 +398,6 @@ class VRegMaskPair {
RHS.getLaneMask().getAsInteger());
}
};
- } // namespace llvm
+
+ } // namespace llvm
#endif // LLVM_LIB_TARGET_VREGMASKPAIR_H
\ No newline at end of file
diff --git a/llvm/unittests/CodeGen/CMakeLists.txt b/llvm/unittests/CodeGen/CMakeLists.txt
index 22dbdaa4fa82e..9d45945704409 100644
--- a/llvm/unittests/CodeGen/CMakeLists.txt
+++ b/llvm/unittests/CodeGen/CMakeLists.txt
@@ -48,10 +48,16 @@ add_llvm_unittest(CodeGenTests
TargetOptionsTest.cpp
TestAsmPrinter.cpp
MLRegAllocDevelopmentFeatures.cpp
- X86MCInstLowerTest.cpp
+ VRegMaskPairTest.cpp
)
add_subdirectory(GlobalISel)
-add_subdirectory(CGPluginTest)
+
+target_include_directories(CodeGenTests
+ PRIVATE
+ ${LLVM_MAIN_SRC_DIR}/lib/Target/AMDGPU
+ ${LLVM_BINARY_DIR}/lib/Target/AMDGPU
+)
+
target_link_libraries(CodeGenTests PRIVATE LLVMTestingSupport)
diff --git a/llvm/unittests/CodeGen/VRegMaskPairTest.cpp b/llvm/unittests/CodeGen/VRegMaskPairTest.cpp
new file mode 100644
index 0000000000000..8ac4a3e9b840d
--- /dev/null
+++ b/llvm/unittests/CodeGen/VRegMaskPairTest.cpp
@@ -0,0 +1,178 @@
+//===- VRegMaskPairTest.cpp - Unit tests for VRegMaskPairSet -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/Register.h"
+#include "llvm/Support/raw_ostream.h"
+#include "gtest/gtest.h"
+
+#include "AMDGPUSSARAUtils.h"
+#include "SIRegisterInfo.h"
+#include "VRegMaskPair.h"
+
+using namespace llvm;
+
+namespace {
+
+class VRegMaskPairTest : public ::testing::Test {
+protected:
+ Register R1 = Register::index2VirtReg(1);
+ Register R2 = Register::index2VirtReg(2);
+ Register R3 = Register::index2VirtReg(3);
+ Register R4 = Register::index2VirtReg(4);
+
+ LaneBitmask M0 = LaneBitmask::getLane(0); // sub0
+ LaneBitmask M1 = LaneBitmask::getLane(1); // sub1
+ LaneBitmask M2 = LaneBitmask::getLane(2); // sub2
+ LaneBitmask M3 = LaneBitmask::getLane(3); // sub3
+ LaneBitmask M01 = M0 | M1;
+ LaneBitmask FULL = LaneBitmask::getAll();
+};
+
+TEST_F(VRegMaskPairTest, BasicInsertAndCoverage) {
+ VRegMaskPairSet Set;
+ EXPECT_TRUE(Set.insert({R1, M0}));
+ EXPECT_TRUE(Set.insert({R1, M1}));
+ EXPECT_FALSE(Set.insert({R1, M1})); // duplicate
+
+ LaneCoverageResult Cov = Set.getCoverage({R1, M01});
+ EXPECT_TRUE(Cov.isFullyCovered());
+ EXPECT_EQ(Cov.getCovered(), M01);
+ EXPECT_EQ(Cov.getNotCovered(), LaneBitmask::getNone());
+}
+
+TEST_F(VRegMaskPairTest, ExactContains) {
+ VRegMaskPairSet Set;
+ Set.insert({R2, M2});
+ EXPECT_TRUE(Set.contains({R2, M2}));
+ EXPECT_FALSE(Set.contains({R2, M3}));
+}
+
+TEST_F(VRegMaskPairTest, UnionAndJoinPreserveEntries) {
+ VRegMaskPairSet A, B;
+ A.insert({R1, M0});
+ A.insert({R2, M1});
+
+ B.insert({R1, M1});
+ B.insert({R3, M0});
+
+ VRegMaskPairSet U = A.set_join(B);
+ EXPECT_TRUE(U.contains({R1, M0}));
+ EXPECT_TRUE(U.contains({R1, M1}));
+ EXPECT_TRUE(U.contains({R2, M1}));
+ EXPECT_TRUE(U.contains({R3, M0}));
+}
+
+TEST_F(VRegMaskPairTest, IntersectionKeepsOnlyCoveredParts) {
+ VRegMaskPairSet A, B;
+ A.insert({R1, M0 | M1});
+ A.insert({R2, M0});
+ A.insert({R3, FULL});
+ A.insert({R4, M1});
+
+ B.insert({R1, M1});
+ B.insert({R2, M1});
+ B.insert({R4, FULL});
+
+ VRegMaskPairSet I = A.set_intersection(B);
+ EXPECT_TRUE(I.contains({R1, M1}));
+ EXPECT_FALSE(I.contains({R2, M0}));
+ EXPECT_FALSE(I.contains({R3, FULL}));
+ EXPECT_TRUE(I.contains({R4, M1}));
+}
+
+TEST_F(VRegMaskPairTest, SubtractionRemovesCoveredParts) {
+ VRegMaskPairSet A, B;
+ A.insert({R1, M0 | M1});
+ A.insert({R2, M1});
+ A.insert({R3, M2});
+
+ B.insert({R1, M1});
+ B.insert({R3, M2});
+
+ VRegMaskPairSet D = A.set_difference(B);
+ EXPECT_TRUE(D.contains({R1, M0}));
+ EXPECT_FALSE(D.contains({R1, M1}));
+ EXPECT_TRUE(D.contains({R2, M1}));
+ EXPECT_FALSE(D.contains({R3, M2}));
+}
+
+TEST_F(VRegMaskPairTest, SetOperations) {
+ VRegMaskPairSet A, B;
+ A.insert({R1, M0});
+ A.insert({R2, M0});
+ A.insert({R3, FULL});
+ A.insert({R4, M1});
+
+ B.insert({R1, M1});
+ B.insert({R2, M1});
+ B.insert({R4, FULL});
+
+ VRegMaskPairSet I = A.set_intersection(B);
+ EXPECT_FALSE(I.contains({R1, M1}));
+ EXPECT_FALSE(I.contains({R2, M0}));
+ EXPECT_FALSE(I.contains({R3, FULL}));
+ EXPECT_TRUE(I.contains({R4, M1}));
+
+ VRegMaskPairSet D = A.set_difference(B);
+ EXPECT_TRUE(D.contains({R1, M0}));
+ EXPECT_TRUE(D.contains({R2, M0}));
+ EXPECT_TRUE(D.contains({R3, FULL}));
+ EXPECT_FALSE(D.contains({R4, M1}));
+
+ VRegMaskPairSet U = A.set_join(B);
+ EXPECT_TRUE(U.contains({R1, M0}));
+ EXPECT_TRUE(U.contains({R1, M1}));
+ EXPECT_TRUE(U.contains({R2, M0}));
+ EXPECT_TRUE(U.contains({R2, M1}));
+ EXPECT_TRUE(U.contains({R3, FULL}));
+ EXPECT_TRUE(U.contains({R4, M1}));
+}
+
+TEST_F(VRegMaskPairTest, InPlaceSetOperations) {
+ VRegMaskPairSet A, B;
+ A.insert({R1, M0});
+ A.insert({R2, M1});
+
+ B.insert({R1, M1});
+ B.insert({R3, M0});
+
+ VRegMaskPairSet AU = A;
+ AU.set_union(B);
+ EXPECT_TRUE(AU.contains({R1, M0}));
+ EXPECT_TRUE(AU.contains({R1, M1}));
+ EXPECT_TRUE(AU.contains({R2, M1}));
+ EXPECT_TRUE(AU.contains({R3, M0}));
+
+ VRegMaskPairSet AI = A;
+ AI.set_intersect(B);
+ EXPECT_FALSE(AI.contains({R1, M1}));
+ EXPECT_FALSE(AI.contains({R1, M0}));
+ EXPECT_FALSE(AI.contains({R2, M1}));
+
+ VRegMaskPairSet AD = A;
+ AD.set_subtract(B);
+ EXPECT_TRUE(AD.contains({R1, M0}));
+ EXPECT_TRUE(AD.contains({R2, M1}));
+ EXPECT_FALSE(AD.contains({R1, M1}));
+}
+
+TEST_F(VRegMaskPairTest, RemoveAndPop) {
+ VRegMaskPairSet Set;
+ Set.insert({R1, M0});
+ Set.insert({R2, M1});
+ Set.insert({R3, M2});
+
+ Set.remove({R2, M1});
+ EXPECT_FALSE(Set.contains({R2, M1}));
+ EXPECT_EQ(Set.size(), 2u);
+
+ VRegMaskPair Last = Set.pop_back_val();
+ EXPECT_FALSE(Set.contains(Last));
+ EXPECT_EQ(Set.size(), 1u);
+}
+
+} // namespace
>From adee2302fbe1db734e645f77d0c6c6f5f1b7faa9 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Thu, 24 Jul 2025 19:34:24 +0000
Subject: [PATCH 41/46] SSA Spiller: LIS on reload and RegisterClass in spill
fixed
---
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index e581a883cb05f..125cb309df020 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -361,6 +361,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
dbgs() << "\n");
Register NewVReg = reloadBefore(I, R);
rewriteUses(R.getVReg(), NewVReg);
+ LIS.createAndComputeVirtRegInterval(NewVReg);
}
std::advance(I, NSpills);
@@ -427,6 +428,7 @@ void AMDGPUSSASpiller::processBlock(MachineBasicBlock &MBB) {
NewVReg = FullVReg;
}
rewriteUses(VMP.getVReg(), NewVReg);
+ LIS.createAndComputeVirtRegInterval(NewVReg);
}
}
}
@@ -577,6 +579,7 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
for (auto R : ReloadInPred) {
Register NewVReg = reloadAtEnd(*Pred, R);
rewriteUses(R.getVReg(), NewVReg);
+ LIS.createAndComputeVirtRegInterval(NewVReg);
}
}
}
@@ -705,8 +708,6 @@ AMDGPUSSASpiller::reloadBefore(MachineBasicBlock::iterator InsertBefore,
TII->loadRegFromStackSlot(*MBB, InsertBefore, NewVReg, FI, RC, TRI, NewVReg);
MachineInstr *ReloadMI = MRI->getVRegDef(NewVReg);
LIS.InsertMachineInstrInMaps(*ReloadMI);
-
- LIS.createAndComputeVirtRegInterval(NewVReg);
auto &Entry = getBlockInfo(*MBB);
Entry.ActiveSet.insert({NewVReg, MRI->getMaxLaneMaskForVReg(NewVReg)});
return NewVReg;
@@ -715,11 +716,16 @@ AMDGPUSSASpiller::reloadBefore(MachineBasicBlock::iterator InsertBefore,
void AMDGPUSSASpiller::spillBefore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
VRegMaskPair VMP) {
- const TargetRegisterClass *RC = TRI->getRegClassForReg(*MRI, VMP.getVReg());
unsigned SubRegIdx = VMP.getSubReg(MRI, TRI);
+ const TargetRegisterClass *RC =
+ SubRegIdx == AMDGPU::NoRegister
+ ? TRI->getRegClassForReg(*MRI, VMP.getVReg())
+ : TRI->getSubRegisterClass(
+ TRI->getRegClassForReg(*MRI, VMP.getVReg()), SubRegIdx);
int FI = assignVirt2StackSlot(VMP);
- TII->storeRegToStackSlot(MBB, InsertBefore, VMP.getVReg(), true, FI, RC, TRI,
- VMP.getVReg(), SubRegIdx);
+ TII->storeRegToStackSlot(MBB, InsertBefore, VMP.getVReg(),
+ SubRegIdx == AMDGPU::NoRegister ? true : false, FI,
+ RC, TRI, VMP.getVReg(), SubRegIdx);
// FIXME: dirty hack! To avoid further changing the TargetInstrInfo interface.
MachineInstr &Spill = *(--InsertBefore);
LIS.InsertMachineInstrInMaps(Spill);
>From fddefec8f0a181ec3f74b2cc71008dda87fff721 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Wed, 30 Jul 2025 16:45:48 +0000
Subject: [PATCH 42/46] SSA Spiller. Debug dumps cleanup. LIT tests added.
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 2 +-
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 46 +-
llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp | 34 +-
.../AMDGPU/SSARA/if_loop_with_subregs.mir | 544 ++++++++++++++++++
.../AMDGPU/SSARA/test_rebuild_ssa_subregs.mir | 375 ++++++++++++
llvm/test/CodeGen/AMDGPU/SSARA/test_spill.mir | 127 ++++
.../AMDGPU/SSARA/test_spill_subregs.mir | 144 +++++
7 files changed, 1238 insertions(+), 34 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/SSARA/if_loop_with_subregs.mir
create mode 100644 llvm/test/CodeGen/AMDGPU/SSARA/test_rebuild_ssa_subregs.mir
create mode 100644 llvm/test/CodeGen/AMDGPU/SSARA/test_spill.mir
create mode 100644 llvm/test/CodeGen/AMDGPU/SSARA/test_spill_subregs.mir
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index 69c1e7b2f6ab5..ff432233f40e4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -171,7 +171,7 @@ void NextUseResult::analyze(const MachineFunction &MF) {
}
dumpUsedInBlock();
T1->stopTimer();
- TG->print(llvm::errs());
+ LLVM_DEBUG(TG->print(llvm::errs()));
}
void NextUseResult::getFromSortedRecords(
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index bf6d466a2f788..7f280a43d4cb2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -38,6 +38,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
using VRegDefStack = std::vector<CurVRegInfo>;
+#ifndef NDEBUG
void printVRegDefStack(VRegDefStack VregDefs) {
VRegDefStack::reverse_iterator It = VregDefs.rbegin();
dbgs() << "\n####################################\n";
@@ -51,6 +52,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
}
dbgs() << "####################################\n";
}
+#endif
SetVector<VRegMaskPair> CrossBlockVRegs;
DenseMap<VRegMaskPair, SmallPtrSet<MachineBasicBlock *, 8>> DefBlocks;
@@ -98,31 +100,30 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
"Error: use does not dominated by definition!\n");
SmallVector<std::tuple<unsigned, unsigned, unsigned>> RegSeqOps;
LaneBitmask UseMask = getOperandLaneMask(Op, TRI, MRI);
- dbgs() << "Use mask : " << PrintLaneMask(UseMask) << "\n";
+ LLVM_DEBUG(dbgs() << "Use mask : " << PrintLaneMask(UseMask)
+ << "\nLooking for appropriate definiton...\n");
LaneBitmask UndefSubRegs = UseMask;
LaneBitmask DefinedLanes = LaneBitmask::getNone();
unsigned SubRegIdx = AMDGPU::NoRegister;
- dbgs() << "Looking for appropriate definiton...\n";
Register CurVReg = AMDGPU::NoRegister;
VRegDefStack VregDefs = VregNames[VReg];
VRegDefStack::reverse_iterator It = VregDefs.rbegin();
for (; It != VregDefs.rend(); ++It) {
CurVRegInfo VRInfo = *It;
- dbgs() << "Def:\n";
CurVReg = VRInfo.CurName;
MachineInstr *DefMI = VRInfo.DefMI;
MachineOperand *DefOp = DefMI->findRegisterDefOperand(CurVReg, TRI);
const TargetRegisterClass *RC =
TRI->getRegClassForOperandReg(*MRI, *DefOp);
- dbgs() << "DefMI: " << *DefMI << "\n";
- dbgs() << "Operand: " << *DefOp << "\n";
LaneBitmask DefMask = VRInfo.PrevMask;
- dbgs() << "Def mask : " << PrintLaneMask(DefMask) << "\n";
LaneBitmask LanesDefinedyCurrentDef = (UndefSubRegs & DefMask) & UseMask;
- dbgs() << "Lanes defined by current Def: "
- << PrintLaneMask(LanesDefinedyCurrentDef) << "\n";
DefinedLanes |= LanesDefinedyCurrentDef;
- dbgs() << "Total defined lanes: " << PrintLaneMask(DefinedLanes) << "\n";
+ LLVM_DEBUG(dbgs() << "Def:\nDefMI: " << *DefMI << "\nOperand : " << *DefOp
+ << "\nDef mask : " << PrintLaneMask(DefMask)
+ << "\nLanes defined by current Def: "
+ << PrintLaneMask(LanesDefinedyCurrentDef)
+ << "\nTotal defined lanes: " << PrintLaneMask(DefinedLanes)
+ << "\n");
if (LanesDefinedyCurrentDef == UseMask) {
// All lanes used here are defined by this def.
@@ -148,9 +149,9 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
SmallVector<unsigned> Idxs =
getCoveringSubRegsForLaneMask(LanesDefinedyCurrentDef, RC, TRI);
for (unsigned SubIdx : Idxs) {
- dbgs() << "Matching subreg: " << SubIdx << " : "
+ LLVM_DEBUG(dbgs() << "Matching subreg: " << SubIdx << " : "
<< PrintLaneMask(TRI->getSubRegIndexLaneMask(SubIdx))
- << "\n";
+ << "\n");
RegSeqOps.push_back({CurVReg, SubIdx, SubIdx});
}
} else {
@@ -160,13 +161,13 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
RegSeqOps.push_back({CurVReg, SrcSubReg, DstSubReg});
}
UndefSubRegs = UseMask & ~DefinedLanes;
- dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n";
+ LLVM_DEBUG(dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n");
if (UndefSubRegs.none())
break;
} else {
// The current definition does not define any of the lanes used
// here. Continue to search for the definition.
- dbgs() << "No lanes defined by this def!\n";
+ LLVM_DEBUG(dbgs() << "No lanes defined by this def!\n");
continue;
}
}
@@ -207,8 +208,8 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
"Use is not dominated by definition!\n");
if (RewriteOp) {
- dbgs() << "Rewriting use: " << Op << " to "
- << printReg(CurVReg, TRI, SubRegIdx, MRI) << "\n";
+ LLVM_DEBUG(dbgs() << "Rewriting use: " << Op << " to "
+ << printReg(CurVReg, TRI, SubRegIdx, MRI) << "\n");
Op.setReg(CurVReg);
Op.setSubReg(SubRegIdx);
}
@@ -223,7 +224,6 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
for (auto &PHI : MBB.phis()) {
MachineOperand &Op = PHI.getOperand(0);
Register Res = Op.getReg();
- printVRegDefStack(VregNames[Res]);
unsigned SubRegIdx = Op.getSubReg();
const TargetRegisterClass *RC =
SubRegIdx ? TRI->getSubRegisterClass(
@@ -237,7 +237,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
? MRI->getMaxLaneMaskForVReg(Res)
: TRI->getSubRegIndexLaneMask(SubRegIdx),
AMDGPU::NoRegister, &PHI});
- printVRegDefStack(VregNames[Res]);
+ LLVM_DEBUG(dbgs() << "\nNames stack:\n";printVRegDefStack(VregNames[Res]));
DefSeen.insert(NewVReg);
Renamed.insert(Res);
}
@@ -261,8 +261,8 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
VregNames[VReg].push_back({NewVReg,
getOperandLaneMask(Op, TRI, MRI),
Op.getSubReg(), &I});
- printVRegDefStack(VregNames[VReg]);
-
+ LLVM_DEBUG(dbgs() << "\nNames stack:\n";
+ printVRegDefStack(VregNames[VReg]));
Op.ChangeToRegister(NewVReg, true, false, false, false, false);
Op.setSubReg(AMDGPU::NoRegister);
@@ -273,8 +273,9 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
} else {
VregNames[VReg].push_back(
{VReg, getOperandLaneMask(Op, TRI, MRI), Op.getSubReg(), &I});
- printVRegDefStack(VregNames[VReg]);
-
+ LLVM_DEBUG(dbgs() << "\nNames stack:\n";
+ printVRegDefStack(VregNames[VReg]));
+
DefSeen.insert(VReg);
}
}
@@ -368,6 +369,9 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
MRI = &MF.getRegInfo();
TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
+ if (MRI->isSSA())
+ return false;
+
CrossBlockVRegs.clear();
DefBlocks.clear();
LiveInBlocks.clear();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
index 125cb309df020..9143e111cbdc6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSSASpiller.cpp
@@ -96,6 +96,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
dbgs() << printReg(P.VReg) << "]\n";
}
+ #ifndef NDEBUG
void dump() {
for (auto SI : RegisterMap) {
dbgs() << "\nMBB: " << SI.first;
@@ -110,6 +111,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
dbgs() << "\n";
}
}
+ #endif
void init(MachineFunction &MF, bool IsVGPRs) {
IsVGPRsPass = IsVGPRs;
@@ -204,6 +206,7 @@ class AMDGPUSSASpiller : public PassInfoMixin <AMDGPUSSASpiller> {
bool run(MachineFunction &MF);
};
+#ifndef NDEBUG
LLVM_ATTRIBUTE_NOINLINE void
AMDGPUSSASpiller::dumpRegSet(RegisterSet VMPs) {
dbgs() << "\n";
@@ -225,6 +228,7 @@ AMDGPUSSASpiller::printVRegMaskPair(const VRegMaskPair P) {
dbgs() << printReg(P.getVReg(), TRI, SubRegIndex, MRI) << "] ";
}
}
+#endif
AMDGPUSSASpiller::SpillInfo &
AMDGPUSSASpiller::getBlockInfo(const MachineBasicBlock &MBB) {
@@ -484,9 +488,9 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
}
for (auto Pred : Preds) {
- dumpRegSet(getBlockInfo(*Pred).SpillSet);
+ LLVM_DEBUG(dumpRegSet(getBlockInfo(*Pred).SpillSet));
Entry.SpillSet.set_union(getBlockInfo(*Pred).SpillSet);
- dumpRegSet(Entry.SpillSet);
+ LLVM_DEBUG(dumpRegSet(Entry.SpillSet));
}
// The line below was added according to algorithm proposed in Hack&Broun.
// It is commented out because of the following observation:
@@ -529,7 +533,6 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
dumpRegSet(PE.SpillSet));
for (auto S : set_intersection(set_difference(Entry.SpillSet, PE.SpillSet),
PE.ActiveSet)) {
- printVRegMaskPair(S);
ToSpill[Pred].insert(S);
}
}
@@ -541,10 +544,14 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
spillAtEnd(*Pred, S);
PE.SpillSet.insert(S);
PE.ActiveSet.remove(S);
- dumpRegSet(PE.ActiveSet);
+ LLVM_DEBUG(dbgs() << "\nPred [ MBB_" << Pred->getNumber()
+ << "] Active set after spilling:\n";
+ dumpRegSet(PE.ActiveSet));
Entry.SpillSet.insert(S);
Entry.ActiveSet.remove(S);
- dumpRegSet(Entry.ActiveSet);
+ LLVM_DEBUG(dbgs() << "\nBlock [ MBB_" << MBB.getNumber()
+ << "] Active set after spilling:\n";
+ dumpRegSet(Entry.ActiveSet));
}
}
@@ -559,13 +566,18 @@ void AMDGPUSSASpiller::connectToPredecessors(MachineBasicBlock &MBB,
<< Pred->getName() << " ] ActiveSet:\n";
dumpRegSet(PE.ActiveSet));
RegisterSet Tmp = set_difference(Entry.ActiveSet, PE.ActiveSet);
- dumpRegSet(Tmp);
+ LLVM_DEBUG(dbgs() << "\nMBB_" << MBB.getNumber() << "." << MBB.getName()
+ << " Active Set and Pred MBB_" << Pred->getNumber() << "."
+ << Pred->getName() << " ActiveSet DIFFERENCE:\n";
+ dumpRegSet(Tmp));
// Pred LiveOuts which are current block PHI operands don't need to be
// active across both edges.
RegisterSet ReloadInPred = set_difference(Tmp, PHIOps);
- dumpRegSet(ReloadInPred);
+ LLVM_DEBUG(dbgs() << "\nPHI operands removed from set:\n";
+ dumpRegSet(ReloadInPred));
set_intersect(ReloadInPred, PE.SpillSet);
- dumpRegSet(ReloadInPred);
+ LLVM_DEBUG(dbgs() << "Reloads and Spilled INTERSECTION:\n";
+ dumpRegSet(ReloadInPred));
if (!ReloadInPred.empty()) {
// Since we operate on SSA, any register that is live across the edge
@@ -873,8 +885,6 @@ unsigned AMDGPUSSASpiller::limit(MachineBasicBlock &MBB, RegisterSet &Active,
unsigned AMDGPUSSASpiller::getRegSetSizeInRegs(const RegisterSet VRegs) {
unsigned Size = 0;
for (auto VMP : VRegs) {
- printVRegMaskPair(VMP);
- dbgs() << "\n";
Size += VMP.getSizeInRegs(TRI);
}
return Size;
@@ -908,9 +918,9 @@ bool AMDGPUSSASpiller::run(MachineFunction &MF) {
init(MF, true);
processFunction(MF);
- MF.viewCFG();
+ // MF.viewCFG();
T1->stopTimer();
- TG->print(llvm::errs());
+ LLVM_DEBUG(TG->print(llvm::errs()));
return false;
}
} // namespace
diff --git a/llvm/test/CodeGen/AMDGPU/SSARA/if_loop_with_subregs.mir b/llvm/test/CodeGen/AMDGPU/SSARA/if_loop_with_subregs.mir
new file mode 100644
index 0000000000000..0ac66ea32f19e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/SSARA/if_loop_with_subregs.mir
@@ -0,0 +1,544 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=amdgpu-rebuild-ssa,amdgpu-ssa-spiller %s -verify-machineinstrs -o - | FileCheck %s
+
+--- |
+ source_filename = "test0.ll"
+ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9"
+
+ define amdgpu_kernel void @test0(ptr addrspace(1) %arg) #0 {
+ S:
+ %test0.kernarg.segment = call nonnull align 16 dereferenceable(264) ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
+ %arg.kernarg.offset = getelementptr inbounds i8, ptr addrspace(4) %test0.kernarg.segment, i64 36, !amdgpu.uniform !0
+ %arg.load = load ptr addrspace(1), ptr addrspace(4) %arg.kernarg.offset, align 4, !invariant.load !0
+ %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %idxX = mul i32 %tmp, 16
+ %idxY = mul i32 %tmp, 19
+ %idxprom = sext i32 %idxX to i64
+ %gepX = getelementptr inbounds i32, ptr addrspace(1) %arg.load, i64 %idxprom
+ %idxprom1 = sext i32 %idxY to i64
+ %gepY = getelementptr inbounds i32, ptr addrspace(1) %arg.load, i64 %idxprom1
+ %X = load i32, ptr addrspace(1) %gepX, align 4
+ %Y = load i32, ptr addrspace(1) %gepY, align 4
+ %gepC = getelementptr inbounds i32, ptr addrspace(1) %arg.load, i32 128, !amdgpu.uniform !0
+ %C = load i32, ptr addrspace(1) %gepC, align 4, !amdgpu.noclobber !0
+ %cmp0 = icmp sge i32 %C, %tmp
+ %0 = call { i1, i64 } @llvm.amdgcn.if.i64(i1 %cmp0)
+ %1 = extractvalue { i1, i64 } %0, 0
+ %2 = extractvalue { i1, i64 } %0, 1
+ br i1 %1, label %B, label %Flow9
+
+ L: ; preds = %Flow9
+ %idxRX = add i32 %X, %tmp
+ %gepD = getelementptr i32, ptr addrspace(1) %arg.load, i32 17, !amdgpu.uniform !0
+ %3 = load <2 x i32>, ptr addrspace(1) %gepD, align 4, !amdgpu.noclobber !0
+ %D5 = extractelement <2 x i32> %3, i32 0
+ %F6 = extractelement <2 x i32> %3, i32 1
+ %idxRY = sub i32 %X, %idxRX
+ br label %E, !amdgpu.uniform !0
+
+ B: ; preds = %S
+ %gepLI = getelementptr i32, ptr addrspace(1) %arg.load, i32 1, !amdgpu.uniform !0
+ %LI0 = load i32, ptr addrspace(1) %gepLI, align 4, !amdgpu.noclobber !0
+ %gepD1 = getelementptr i32, ptr addrspace(1) %arg.load, i32 27, !amdgpu.uniform !0
+ %4 = load <2 x i32>, ptr addrspace(1) %gepD1, align 4, !amdgpu.noclobber !0
+ %D17 = extractelement <2 x i32> %4, i32 0
+ %F18 = extractelement <2 x i32> %4, i32 1
+ %gepLB = getelementptr i32, ptr addrspace(1) %arg.load, i32 256, !amdgpu.uniform !0
+ %LB = load i32, ptr addrspace(1) %gepLB, align 4, !amdgpu.noclobber !0
+ br label %H, !amdgpu.uniform !0
+
+ Flow9: ; preds = %Flow, %S
+ %5 = phi i32 [ %idxY, %Flow ], [ undef, %S ]
+ %6 = phi i32 [ %idxX, %Flow ], [ undef, %S ]
+ %7 = phi i32 [ %F3.lcssa, %Flow ], [ undef, %S ]
+ %8 = phi i32 [ %D17, %Flow ], [ undef, %S ]
+ %9 = call { i1, i64 } @llvm.amdgcn.else.i64.i64(i64 %2)
+ %10 = extractvalue { i1, i64 } %9, 0
+ %11 = extractvalue { i1, i64 } %9, 1
+ br i1 %10, label %L, label %E
+
+ H: ; preds = %B, %H
+ %I = phi i32 [ %II, %H ], [ %LI0, %B ]
+ %F2 = phi i32 [ %F3, %H ], [ %F18, %B ]
+ %idxL = shl i32 %X, %I
+ %idxprom2 = sext i32 %idxL to i64
+ %gepL = getelementptr i32, ptr addrspace(1) %arg.load, i64 %idxprom2
+ %lval = load i32, ptr addrspace(1) %gepL, align 4
+ %F3 = add nsw i32 %lval, %F2
+ %II = add nsw i32 %I, 1
+ %cmpl = icmp sgt i32 %II, %LB
+ br i1 %cmpl, label %Flow, label %H, !amdgpu.uniform !0
+
+ Flow: ; preds = %H
+ %F3.lcssa = phi i32 [ %F3, %H ]
+ br label %Flow9, !amdgpu.uniform !0
+
+ E: ; preds = %L, %Flow9
+ %D2 = phi i32 [ %8, %Flow9 ], [ %D5, %L ]
+ %F4 = phi i32 [ %7, %Flow9 ], [ %F6, %L ]
+ %idxRX1 = phi i32 [ %6, %Flow9 ], [ %idxRX, %L ]
+ %idxRY1 = phi i32 [ %5, %Flow9 ], [ %idxRY, %L ]
+ call void @llvm.amdgcn.end.cf.i64(i64 %11)
+ %idxprom3 = sext i32 %idxRX1 to i64
+ %gepRX = getelementptr i32, ptr addrspace(1) %arg.load, i64 %idxprom3
+ %idxprom4 = sext i32 %idxRY1 to i64
+ %gepRY = getelementptr i32, ptr addrspace(1) %arg.load, i64 %idxprom4
+ %resX = mul i32 %X, %D2
+ %resY = mul i32 %Y, %F4
+ store i32 %resX, ptr addrspace(1) %gepRX, align 4
+ store i32 %resY, ptr addrspace(1) %gepRY, align 4
+ ret void
+ }
+
+ declare noundef i32 @llvm.amdgcn.workitem.id.x() #1
+
+ declare noundef align 4 ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr() #2
+
+ declare { i1, i64 } @llvm.amdgcn.if.i64(i1) #3
+
+ declare { i1, i64 } @llvm.amdgcn.else.i64.i64(i64) #3
+
+ declare i64 @llvm.amdgcn.if.break.i64(i1, i64) #4
+
+ declare i1 @llvm.amdgcn.loop.i64(i64) #3
+
+ declare void @llvm.amdgcn.end.cf.i64(i64) #3
+
+ attributes #0 = { nounwind "amdgpu-num-vgpr"="8" "amdgpu-wave-limiter"="true" "target-cpu"="gfx900" }
+ attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) "target-cpu"="gfx900" }
+ attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+ attributes #3 = { nocallback nofree nounwind willreturn }
+ attributes #4 = { nocallback nofree nounwind willreturn memory(none) }
+
+ !0 = !{}
+
+...
+---
+name: test0
+alignment: 1
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+callsEHReturn: false
+callsUnwindInit: false
+hasEHCatchret: false
+hasEHScopes: false
+hasEHFunclets: false
+isOutlined: false
+debugInstrRef: false
+failsVerification: false
+tracksDebugUserValues: false
+registers:
+ - { id: 0, class: sreg_64_xexec, preferred-register: '' }
+ - { id: 1, class: vgpr_32, preferred-register: '' }
+ - { id: 2, class: vgpr_32, preferred-register: '' }
+ - { id: 3, class: vgpr_32, preferred-register: '' }
+ - { id: 4, class: vgpr_32, preferred-register: '' }
+ - { id: 5, class: vgpr_32, preferred-register: '' }
+ - { id: 6, class: sreg_64, preferred-register: '' }
+ - { id: 7, class: vgpr_32, preferred-register: '' }
+ - { id: 8, class: sreg_32, preferred-register: '' }
+ - { id: 9, class: sreg_32, preferred-register: '' }
+ - { id: 10, class: vgpr_32, preferred-register: '' }
+ - { id: 11, class: sreg_32_xm0_xexec, preferred-register: '' }
+ - { id: 12, class: sreg_32, preferred-register: '' }
+ - { id: 13, class: sreg_32, preferred-register: '' }
+ - { id: 14, class: sreg_32, preferred-register: '' }
+ - { id: 15, class: vgpr_32, preferred-register: '' }
+ - { id: 16, class: vgpr_32, preferred-register: '' }
+ - { id: 17, class: vgpr_32, preferred-register: '' }
+ - { id: 18, class: sreg_32, preferred-register: '' }
+ - { id: 19, class: sreg_64, preferred-register: '' }
+ - { id: 20, class: sreg_32, preferred-register: '%23' }
+ - { id: 21, class: vgpr_32, preferred-register: '' }
+ - { id: 22, class: vgpr_32, preferred-register: '' }
+ - { id: 23, class: sreg_32, preferred-register: '%20' }
+ - { id: 24, class: vgpr_32, preferred-register: '' }
+ - { id: 25, class: vgpr_32, preferred-register: '' }
+ - { id: 26, class: vgpr_32, preferred-register: '' }
+ - { id: 27, class: vgpr_32, preferred-register: '' }
+ - { id: 28, class: vgpr_32, preferred-register: '' }
+ - { id: 29, class: vgpr_32, preferred-register: '' }
+ - { id: 30, class: vgpr_32, preferred-register: '' }
+ - { id: 31, class: vgpr_32, preferred-register: '' }
+ - { id: 32, class: sgpr_64, preferred-register: '' }
+ - { id: 33, class: sgpr_64, preferred-register: '' }
+ - { id: 34, class: sgpr_64, preferred-register: '' }
+ - { id: 35, class: sgpr_32, preferred-register: '' }
+ - { id: 36, class: sgpr_32, preferred-register: '' }
+ - { id: 37, class: sgpr_32, preferred-register: '' }
+ - { id: 38, class: sgpr_32, preferred-register: '' }
+ - { id: 39, class: sreg_32, preferred-register: '' }
+ - { id: 40, class: sreg_64_xexec, preferred-register: '' }
+ - { id: 41, class: sreg_32, preferred-register: '' }
+ - { id: 42, class: sreg_32, preferred-register: '' }
+ - { id: 43, class: sreg_32, preferred-register: '' }
+ - { id: 44, class: vgpr_32, preferred-register: '' }
+ - { id: 45, class: sreg_32, preferred-register: '' }
+ - { id: 46, class: vgpr_32, preferred-register: '' }
+ - { id: 47, class: sreg_32_xm0_xexec, preferred-register: '' }
+ - { id: 48, class: sreg_64, preferred-register: '$vcc' }
+ - { id: 49, class: sreg_32_xm0_xexec, preferred-register: '' }
+ - { id: 50, class: sreg_64_xexec, preferred-register: '' }
+ - { id: 51, class: sreg_32_xm0_xexec, preferred-register: '' }
+ - { id: 52, class: vgpr_32, preferred-register: '' }
+ - { id: 53, class: vgpr_32, preferred-register: '' }
+ - { id: 54, class: vgpr_32, preferred-register: '' }
+ - { id: 55, class: vreg_64, preferred-register: '' }
+ - { id: 56, class: sreg_32, preferred-register: '' }
+ - { id: 57, class: vreg_64, preferred-register: '' }
+ - { id: 58, class: vreg_64, preferred-register: '' }
+ - { id: 59, class: vgpr_32, preferred-register: '' }
+ - { id: 60, class: sreg_32, preferred-register: '' }
+ - { id: 61, class: sreg_64_xexec, preferred-register: '' }
+ - { id: 62, class: vgpr_32, preferred-register: '' }
+ - { id: 63, class: vgpr_32, preferred-register: '' }
+ - { id: 64, class: vreg_64, preferred-register: '' }
+ - { id: 65, class: sreg_32, preferred-register: '' }
+ - { id: 66, class: vreg_64, preferred-register: '' }
+ - { id: 67, class: vreg_64, preferred-register: '' }
+ - { id: 68, class: vgpr_32, preferred-register: '' }
+ - { id: 69, class: vgpr_32, preferred-register: '' }
+ - { id: 70, class: vreg_64, preferred-register: '' }
+ - { id: 71, class: vreg_64, preferred-register: '' }
+ - { id: 72, class: vreg_64, preferred-register: '' }
+ - { id: 73, class: vgpr_32, preferred-register: '' }
+ - { id: 74, class: vgpr_32, preferred-register: '' }
+ - { id: 75, class: vgpr_32, preferred-register: '' }
+ - { id: 76, class: vgpr_32, preferred-register: '' }
+ - { id: 77, class: vgpr_32, preferred-register: '' }
+ - { id: 78, class: vgpr_32, preferred-register: '' }
+ - { id: 79, class: vgpr_32, preferred-register: '' }
+ - { id: 80, class: vgpr_32, preferred-register: '' }
+ - { id: 81, class: vgpr_32, preferred-register: '' }
+ - { id: 82, class: vgpr_32, preferred-register: '' }
+ - { id: 83, class: vgpr_32, preferred-register: '' }
+ - { id: 84, class: sreg_64_xexec, preferred-register: '$vcc' }
+ - { id: 85, class: sreg_64_xexec, preferred-register: '$vcc' }
+ - { id: 86, class: sreg_32_xexec_hi_and_sreg_32_xm0, preferred-register: '' }
+ - { id: 87, class: vgpr_32, preferred-register: '' }
+ - { id: 88, class: sreg_32_xexec_hi_and_sreg_32_xm0, preferred-register: '' }
+ - { id: 89, class: vgpr_32, preferred-register: '' }
+ - { id: 90, class: vgpr_32, preferred-register: '' }
+ - { id: 91, class: vgpr_32, preferred-register: '' }
+ - { id: 92, class: vgpr_32, preferred-register: '' }
+ - { id: 93, class: sreg_64_xexec, preferred-register: '$vcc' }
+ - { id: 94, class: sreg_64_xexec, preferred-register: '$vcc' }
+ - { id: 95, class: sreg_32_xexec_hi_and_sreg_32_xm0, preferred-register: '' }
+ - { id: 96, class: vgpr_32, preferred-register: '' }
+ - { id: 97, class: sreg_32_xexec_hi_and_sreg_32_xm0, preferred-register: '' }
+ - { id: 98, class: vgpr_32, preferred-register: '' }
+ - { id: 99, class: vgpr_32, preferred-register: '' }
+ - { id: 100, class: vgpr_32, preferred-register: '' }
+ - { id: 101, class: vgpr_32, preferred-register: '' }
+ - { id: 102, class: sreg_64_xexec, preferred-register: '$vcc' }
+ - { id: 103, class: sreg_64_xexec, preferred-register: '$vcc' }
+ - { id: 104, class: sreg_32_xexec_hi_and_sreg_32_xm0, preferred-register: '' }
+ - { id: 105, class: vgpr_32, preferred-register: '' }
+ - { id: 106, class: sreg_32_xexec_hi_and_sreg_32_xm0, preferred-register: '' }
+ - { id: 107, class: vgpr_32, preferred-register: '' }
+ - { id: 108, class: vgpr_32, preferred-register: '' }
+ - { id: 109, class: vgpr_32, preferred-register: '' }
+ - { id: 110, class: vgpr_32, preferred-register: '' }
+ - { id: 111, class: vgpr_32, preferred-register: '' }
+ - { id: 112, class: vgpr_32, preferred-register: '' }
+ - { id: 113, class: vgpr_32, preferred-register: '' }
+ - { id: 114, class: sreg_32, preferred-register: '' }
+ - { id: 115, class: vgpr_32, preferred-register: '' }
+ - { id: 116, class: sreg_32_xm0_xexec, preferred-register: '' }
+ - { id: 117, class: vgpr_32, preferred-register: '' }
+ - { id: 118, class: vgpr_32, preferred-register: '' }
+ - { id: 119, class: vgpr_32, preferred-register: '' }
+ - { id: 120, class: vgpr_32, preferred-register: '' }
+ - { id: 121, class: vgpr_32, preferred-register: '' }
+ - { id: 122, class: sreg_64, preferred-register: '' }
+ - { id: 123, class: sreg_64, preferred-register: '' }
+ - { id: 124, class: sreg_64, preferred-register: '' }
+liveins:
+ - { reg: '$vgpr0', virtual-reg: '%29' }
+ - { reg: '$sgpr2_sgpr3', virtual-reg: '%33' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 1
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ functionContext: ''
+ maxCallFrameSize: 4294967295
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ hasTailCall: false
+ isCalleeSavedInfoValid: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack: []
+stack: []
+entry_values: []
+callSites: []
+debugValueSubstitutions: []
+constants: []
+machineFunctionInfo:
+ explicitKernArgSize: 8
+ maxKernArgAlign: 8
+ ldsSize: 0
+ gdsSize: 0
+ dynLDSAlign: 1
+ isEntryFunction: true
+ isChainFunction: false
+ noSignedZerosFPMath: false
+ memoryBound: false
+ waveLimiter: true
+ hasSpilledSGPRs: false
+ hasSpilledVGPRs: false
+ scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
+ frameOffsetReg: '$fp_reg'
+ stackPtrOffsetReg: '$sgpr32'
+ bytesInStackArgArea: 0
+ returnsVoid: true
+ argumentInfo:
+ dispatchPtr: { reg: '$sgpr0_sgpr1' }
+ kernargSegmentPtr: { reg: '$sgpr2_sgpr3' }
+ dispatchID: { reg: '$sgpr4_sgpr5' }
+ workGroupIDX: { reg: '$sgpr6' }
+ workGroupIDY: { reg: '$sgpr7' }
+ workGroupIDZ: { reg: '$sgpr8' }
+ privateSegmentWaveByteOffset: { reg: '$sgpr9' }
+ workItemIDX: { reg: '$vgpr0' }
+ workItemIDY: { reg: '$vgpr1' }
+ workItemIDZ: { reg: '$vgpr2' }
+ psInputAddr: 0
+ psInputEnable: 0
+ mode:
+ ieee: true
+ dx10-clamp: true
+ fp32-input-denormals: true
+ fp32-output-denormals: true
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+ highBitsOf32BitAddress: 0
+ occupancy: 8
+ vgprForAGPRCopy: ''
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+ longBranchReservedReg: ''
+ hasInitWholeWave: false
+body: |
+ ; CHECK-LABEL: name: test0
+ ; CHECK: bb.0.S:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: liveins: $vgpr0, $sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: early-clobber %0:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.arg.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: undef [[V_MUL_U32_U24_e32_:%[0-9]+]].sub0:vreg_64 = V_MUL_U32_U24_e32 19, [[COPY1]], implicit $exec
+ ; CHECK-NEXT: [[V_LSHLREV_B32_e32_:%[0-9]+]]:vgpr_32 = nuw nsw V_LSHLREV_B32_e32 2, [[V_MUL_U32_U24_e32_]].sub0, implicit $exec
+ ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, [[V_LSHLREV_B32_e32_]], 0, 0, implicit $exec :: (load (s32) from %ir.gepY, addrspace 1)
+ ; CHECK-NEXT: [[V_LSHLREV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e32 6, [[COPY1]], implicit $exec
+ ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD_SADDR1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, [[V_LSHLREV_B32_e32_1]], 0, 0, implicit $exec :: (load (s32) from %ir.gepX, addrspace 1)
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0, 512, 0 :: ("amdgpu-noclobber" load (s32) from %ir.gepC, addrspace 1)
+ ; CHECK-NEXT: [[V_CMP_GE_I32_e64_:%[0-9]+]]:sreg_64 = V_CMP_GE_I32_e64 [[S_LOAD_DWORD_IMM]], [[COPY1]], implicit $exec
+ ; CHECK-NEXT: undef [[DEF:%[0-9]+]].sub0:sreg_64_xexec = IMPLICIT_DEF
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: undef [[DEF2:%[0-9]+]].sub0:vreg_64 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
+ ; CHECK-NEXT: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY2]], [[V_CMP_GE_I32_e64_]], implicit-def dead $scc
+ ; CHECK-NEXT: [[S_XOR_B64_:%[0-9]+]]:sreg_64 = S_XOR_B64 [[S_AND_B64_]], [[COPY2]], implicit-def dead $scc
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE [[V_MUL_U32_U24_e32_]].sub0, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: $exec = S_MOV_B64_term [[S_AND_B64_]]
+ ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.3, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.L:
+ ; CHECK-NEXT: successors: %bb.6(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: early-clobber %61:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec %0, 68, 0 :: ("amdgpu-noclobber" load (s64) from %ir.gepD, align 4, addrspace 1)
+ ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[GLOBAL_LOAD_DWORD_SADDR1]], %140, implicit $exec
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY %61.sub0, implicit $exec
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY %61.sub1, implicit $exec
+ ; CHECK-NEXT: [[V_SUB_U32_e32_:%[0-9]+]]:vgpr_32 = V_SUB_U32_e32 [[GLOBAL_LOAD_DWORD_SADDR1]], [[V_ADD_U32_e32_]], implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.B:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0, 4, 0 :: ("amdgpu-noclobber" load (s32) from %ir.gepLI, addrspace 1)
+ ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM_ec:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec %0, 108, 0 :: ("amdgpu-noclobber" load (s64) from %ir.gepD1, align 4, addrspace 1)
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0, 1024, 0 :: ("amdgpu-noclobber" load (s32) from %ir.gepLB, addrspace 1)
+ ; CHECK-NEXT: [[V_LSHLREV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e32 4, [[COPY1]], implicit $exec
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_LOAD_DWORDX2_IMM_ec]].sub1, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.Flow9:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.6(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[V_LSHLREV_B32_e32_2]], %bb.5, [[DEF2]].sub0, %bb.0
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI %134, %bb.5, [[DEF1]], %bb.0
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[S_LOAD_DWORDX2_IMM_ec]].sub0, %bb.5, [[DEF]].sub0, %bb.0
+ ; CHECK-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI %136, %bb.5, [[COPY1]], %bb.0
+ ; CHECK-NEXT: [[S_OR_SAVEEXEC_B64_:%[0-9]+]]:sreg_64 = S_OR_SAVEEXEC_B64 [[S_XOR_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[PHI2]], implicit $exec
+ ; CHECK-NEXT: [[SI_SPILL_V32_RESTORE:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: $exec = S_XOR_B64_term $exec, [[S_OR_SAVEEXEC_B64_]], implicit-def $scc
+ ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.6, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.H:
+ ; CHECK-NEXT: successors: %bb.5(0x04000000), %bb.4(0x7c000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI4:%[0-9]+]]:vgpr_32 = PHI [[COPY5]], %bb.2, %134, %bb.4
+ ; CHECK-NEXT: [[PHI5:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[S_LOAD_DWORD_IMM1]], %bb.2, %135, %bb.4
+ ; CHECK-NEXT: undef [[V_LSHLREV_B32_e32_3:%[0-9]+]].sub0:vreg_64 = V_LSHLREV_B32_e32 [[PHI5]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+ ; CHECK-NEXT: [[V_ASHRREV_I32_e32_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 31, [[V_LSHLREV_B32_e32_3]].sub0, implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ASHRREV_I32_e32_]], %subreg.sub1, [[V_LSHLREV_B32_e32_3]].sub0, %subreg.sub0
+ ; CHECK-NEXT: [[V_LSHLREV_B64_e64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64_e64 2, [[REG_SEQUENCE]], implicit $exec
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY %0.sub1
+ ; CHECK-NEXT: undef [[V_ADD_CO_U32_e64_:%[0-9]+]].sub0:vreg_64, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64 %0.sub0, [[V_LSHLREV_B64_e64_]].sub0, 0, implicit $exec
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE [[V_MUL_U32_U24_e32_]].sub0, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: [[V_ADDC_U32_e64_:%[0-9]+]]:vgpr_32, dead [[V_ADDC_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADDC_U32_e64 [[COPY7]], [[V_LSHLREV_B64_e64_]].sub1, [[V_ADD_CO_U32_e64_1]], 0, implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADDC_U32_e64_]], %subreg.sub1, [[V_ADD_CO_U32_e64_]].sub0, %subreg.sub0
+ ; CHECK-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[REG_SEQUENCE1]], 0, 0, implicit $exec :: (load (s32) from %ir.gepL, addrspace 1)
+ ; CHECK-NEXT: [[V_ADD_U32_e32_1:%[0-9]+]]:vgpr_32 = nsw V_ADD_U32_e32 [[GLOBAL_LOAD_DWORD]], [[PHI4]], implicit $exec
+ ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32_xm0_xexec = nsw S_ADD_I32 [[PHI5]], 1, implicit-def dead $scc
+ ; CHECK-NEXT: S_CMP_LE_I32 [[S_ADD_I32_]], [[S_LOAD_DWORD_IMM2]], implicit-def $scc
+ ; CHECK-NEXT: S_CBRANCH_SCC1 %bb.4, implicit $scc
+ ; CHECK-NEXT: S_BRANCH %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.Flow:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[DEF3:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: S_BRANCH %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.E:
+ ; CHECK-NEXT: [[PHI6:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.1, [[COPY6]], %bb.3
+ ; CHECK-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI [[V_SUB_U32_e32_]], %bb.1, [[SI_SPILL_V32_RESTORE]], %bb.3
+ ; CHECK-NEXT: [[PHI8:%[0-9]+]]:vgpr_32 = PHI [[V_ADD_U32_e32_]], %bb.1, [[PHI]], %bb.3
+ ; CHECK-NEXT: [[PHI9:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.1, [[PHI1]], %bb.3
+ ; CHECK-NEXT: $exec = S_OR_B64 $exec, [[S_OR_SAVEEXEC_B64_]], implicit-def $scc
+ ; CHECK-NEXT: [[V_ASHRREV_I32_e32_1:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 31, [[PHI8]], implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ASHRREV_I32_e32_1]], %subreg.sub1, [[PHI8]], %subreg.sub0
+ ; CHECK-NEXT: [[V_LSHLREV_B64_e64_1:%[0-9]+]]:vreg_64 = V_LSHLREV_B64_e64 2, [[REG_SEQUENCE2]], implicit $exec
+ ; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY %0.sub1
+ ; CHECK-NEXT: undef [[V_ADD_CO_U32_e64_2:%[0-9]+]].sub0:vreg_64, [[V_ADD_CO_U32_e64_3:%[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64 %0.sub0, [[V_LSHLREV_B64_e64_1]].sub0, 0, implicit $exec
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE killed [[PHI9]], %stack.1, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.1, addrspace 5)
+ ; CHECK-NEXT: [[V_ADDC_U32_e64_2:%[0-9]+]]:vgpr_32, dead [[V_ADDC_U32_e64_3:%[0-9]+]]:sreg_64_xexec = V_ADDC_U32_e64 [[COPY8]], [[V_LSHLREV_B64_e64_1]].sub1, [[V_ADD_CO_U32_e64_3]], 0, implicit $exec
+ ; CHECK-NEXT: [[V_ASHRREV_I32_e32_2:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 31, [[PHI7]], implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ASHRREV_I32_e32_2]], %subreg.sub1, [[PHI7]], %subreg.sub0
+ ; CHECK-NEXT: [[V_LSHLREV_B64_e64_2:%[0-9]+]]:vreg_64 = V_LSHLREV_B64_e64 2, [[REG_SEQUENCE3]], implicit $exec
+ ; CHECK-NEXT: undef [[V_ADD_CO_U32_e64_4:%[0-9]+]].sub0:vreg_64, [[V_ADD_CO_U32_e64_5:%[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64 %0.sub0, [[V_LSHLREV_B64_e64_2]].sub0, 0, implicit $exec
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE killed [[GLOBAL_LOAD_DWORD_SADDR]], %stack.2, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; CHECK-NEXT: [[V_ADDC_U32_e64_4:%[0-9]+]]:vgpr_32, dead [[V_ADDC_U32_e64_5:%[0-9]+]]:sreg_64_xexec = V_ADDC_U32_e64 [[COPY8]], [[V_LSHLREV_B64_e64_2]].sub1, [[V_ADD_CO_U32_e64_5]], 0, implicit $exec
+ ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[PHI6]], implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADDC_U32_e64_2]], %subreg.sub1, [[V_ADD_CO_U32_e64_2]].sub0, %subreg.sub0
+ ; CHECK-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE4]], [[V_MUL_LO_U32_e64_]], 0, 0, implicit $exec :: (store (s32) into %ir.gepRX, addrspace 1)
+ ; CHECK-NEXT: [[SI_SPILL_V32_RESTORE1:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; CHECK-NEXT: [[SI_SPILL_V32_RESTORE2:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.1, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.1, addrspace 5)
+ ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[SI_SPILL_V32_RESTORE1]], [[SI_SPILL_V32_RESTORE2]], implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADDC_U32_e64_4]], %subreg.sub1, [[V_ADD_CO_U32_e64_4]].sub0, %subreg.sub0
+ ; CHECK-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE5]], [[V_MUL_LO_U32_e64_1]], 0, 0, implicit $exec :: (store (s32) into %ir.gepRY, addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0.S:
+ successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ liveins: $vgpr0, $sgpr2_sgpr3
+
+ %33:sgpr_64(p4) = COPY $sgpr2_sgpr3
+ %115:vgpr_32 = COPY $vgpr0
+ early-clobber %0:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec %33(p4), 36, 0 :: (dereferenceable invariant load (s64) from %ir.arg.kernarg.offset, align 4, addrspace 4)
+ undef %70.sub0:vreg_64 = V_MUL_U32_U24_e32 19, %115, implicit $exec
+ %46:vgpr_32 = nuw nsw V_LSHLREV_B32_e32 2, %70.sub0, implicit $exec
+ %5:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %46, 0, 0, implicit $exec :: (load (s32) from %ir.gepY, addrspace 1)
+ %44:vgpr_32 = V_LSHLREV_B32_e32 6, %115, implicit $exec
+ %4:vgpr_32 = GLOBAL_LOAD_DWORD_SADDR %0, %44, 0, 0, implicit $exec :: (load (s32) from %ir.gepX, addrspace 1)
+ %47:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0, 512, 0 :: ("amdgpu-noclobber" load (s32) from %ir.gepC, addrspace 1)
+ %48:sreg_64 = V_CMP_GE_I32_e64 %47, %115, implicit $exec
+ undef %50.sub0:sreg_64_xexec = IMPLICIT_DEF
+ %113:vgpr_32 = IMPLICIT_DEF
+ undef %64.sub0:vreg_64 = IMPLICIT_DEF
+ %122:sreg_64 = COPY $exec, implicit-def $exec
+ %123:sreg_64 = S_AND_B64 %122, %48, implicit-def dead $scc
+ %6:sreg_64 = S_XOR_B64 %123, %122, implicit-def dead $scc
+ $exec = S_MOV_B64_term %123
+ S_CBRANCH_EXECZ %bb.3, implicit $exec
+ S_BRANCH %bb.2
+
+ bb.1.L:
+ successors: %bb.6(0x80000000)
+
+ early-clobber %61:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec %0, 68, 0 :: ("amdgpu-noclobber" load (s64) from %ir.gepD, align 4, addrspace 1)
+ undef %64.sub0:vreg_64 = V_ADD_U32_e32 %4, %115, implicit $exec
+ %118:vgpr_32 = COPY %61.sub0, implicit $exec
+ %113:vgpr_32 = COPY %61.sub1, implicit $exec
+ undef %70.sub0:vreg_64 = V_SUB_U32_e32 %4, %64.sub0, implicit $exec
+ S_BRANCH %bb.6
+
+ bb.2.B:
+ successors: %bb.4(0x80000000)
+
+ %116:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0, 4, 0 :: ("amdgpu-noclobber" load (s32) from %ir.gepLI, addrspace 1)
+ early-clobber %50:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec %0, 108, 0 :: ("amdgpu-noclobber" load (s64) from %ir.gepD1, align 4, addrspace 1)
+ %51:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0, 1024, 0 :: ("amdgpu-noclobber" load (s32) from %ir.gepLB, addrspace 1)
+ undef %64.sub0:vreg_64 = V_LSHLREV_B32_e32 4, %115, implicit $exec
+ %113:vgpr_32 = COPY %50.sub1, implicit $exec
+ S_BRANCH %bb.4
+
+ bb.3.Flow9:
+ successors: %bb.1(0x40000000), %bb.6(0x40000000)
+
+ %19:sreg_64 = S_OR_SAVEEXEC_B64 %6, implicit-def $exec, implicit-def $scc, implicit $exec
+ %118:vgpr_32 = COPY %50.sub0, implicit $exec
+ $exec = S_XOR_B64_term $exec, %19, implicit-def $scc
+ S_CBRANCH_EXECZ %bb.6, implicit $exec
+ S_BRANCH %bb.1
+
+ bb.4.H:
+ successors: %bb.5(0x04000000), %bb.4(0x7c000000)
+
+ undef %55.sub0:vreg_64 = V_LSHLREV_B32_e32 %116, %4, implicit $exec
+ %55.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %55.sub0, implicit $exec
+ %57:vreg_64 = V_LSHLREV_B64_e64 2, %55, implicit $exec
+ %90:vgpr_32 = COPY %0.sub1
+ undef %58.sub0:vreg_64, %84:sreg_64_xexec = V_ADD_CO_U32_e64 %0.sub0, %57.sub0, 0, implicit $exec
+ %58.sub1:vreg_64, dead %85:sreg_64_xexec = V_ADDC_U32_e64 %90, %57.sub1, %84, 0, implicit $exec
+ %59:vgpr_32 = GLOBAL_LOAD_DWORD %58, 0, 0, implicit $exec :: (load (s32) from %ir.gepL, addrspace 1)
+ %113:vgpr_32 = nsw V_ADD_U32_e32 %59, %113, implicit $exec
+ %116:sreg_32_xm0_xexec = nsw S_ADD_I32 %116, 1, implicit-def dead $scc
+ S_CMP_LE_I32 %116, %51, implicit-def $scc
+ S_CBRANCH_SCC1 %bb.4, implicit $scc
+ S_BRANCH %bb.5
+
+ bb.5.Flow:
+ successors: %bb.3(0x80000000)
+
+ %115:vgpr_32 = IMPLICIT_DEF
+ S_BRANCH %bb.3
+
+ bb.6.E:
+ $exec = S_OR_B64 $exec, %19, implicit-def $scc
+ %64.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %64.sub0, implicit $exec
+ %66:vreg_64 = V_LSHLREV_B64_e64 2, %64, implicit $exec
+ %99:vgpr_32 = COPY %0.sub1
+ undef %67.sub0:vreg_64, %93:sreg_64_xexec = V_ADD_CO_U32_e64 %0.sub0, %66.sub0, 0, implicit $exec
+ %67.sub1:vreg_64, dead %94:sreg_64_xexec = V_ADDC_U32_e64 %99, %66.sub1, %93, 0, implicit $exec
+ %70.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %70.sub0, implicit $exec
+ %71:vreg_64 = V_LSHLREV_B64_e64 2, %70, implicit $exec
+ undef %72.sub0:vreg_64, %102:sreg_64_xexec = V_ADD_CO_U32_e64 %0.sub0, %71.sub0, 0, implicit $exec
+ %72.sub1:vreg_64, dead %103:sreg_64_xexec = V_ADDC_U32_e64 %99, %71.sub1, %102, 0, implicit $exec
+ %73:vgpr_32 = V_MUL_LO_U32_e64 %4, %118, implicit $exec
+ GLOBAL_STORE_DWORD %67, %73, 0, 0, implicit $exec :: (store (s32) into %ir.gepRX, addrspace 1)
+ %74:vgpr_32 = V_MUL_LO_U32_e64 %5, %113, implicit $exec
+ GLOBAL_STORE_DWORD %72, %74, 0, 0, implicit $exec :: (store (s32) into %ir.gepRY, addrspace 1)
+ S_ENDPGM 0
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/SSARA/test_rebuild_ssa_subregs.mir b/llvm/test/CodeGen/AMDGPU/SSARA/test_rebuild_ssa_subregs.mir
new file mode 100644
index 0000000000000..ee73e0dcfa377
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/SSARA/test_rebuild_ssa_subregs.mir
@@ -0,0 +1,375 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-rebuild-ssa -verify-machineinstrs %s -verify-machineinstrs -o - | FileCheck %s
+--- |
+ source_filename = "test3.ll"
+ target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9"
+ target triple = "amdgcn"
+
+ declare noundef i32 @llvm.amdgcn.workitem.id.x() #0
+
+ define amdgpu_kernel void @test_subregs(ptr addrspace(1) %in, ptr addrspace(1) %out, i32 %arg) #1 {
+ bb0:
+ %test_subregs.kernarg.segment = call nonnull align 16 dereferenceable(276) ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr()
+ %in.kernarg.offset = getelementptr inbounds i8, ptr addrspace(4) %test_subregs.kernarg.segment, i64 36, !amdgpu.uniform !0
+ %0 = load <2 x i64>, ptr addrspace(4) %in.kernarg.offset, align 4, !invariant.load !0
+ %in.load1 = extractelement <2 x i64> %0, i32 0
+ %1 = inttoptr i64 %in.load1 to ptr addrspace(1)
+ %arg.kernarg.offset = getelementptr inbounds i8, ptr addrspace(4) %test_subregs.kernarg.segment, i64 52, !amdgpu.uniform !0
+ %arg.load = load i32, ptr addrspace(4) %arg.kernarg.offset, align 4, !invariant.load !0
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %idxprom = sext i32 %tid to i64
+ %gep = getelementptr i32, ptr addrspace(1) %1, i64 %idxprom
+ %vec0 = load <4 x i32>, ptr addrspace(1) %gep, align 16
+ %cmp0 = icmp sle i32 %arg.load, 10
+ br i1 %cmp0, label %bb4, label %Flow, !amdgpu.uniform !0
+
+ Flow: ; preds = %bb4, %bb0
+ %2 = phi <4 x i32> [ %vec5, %bb4 ], [ undef, %bb0 ]
+ %3 = phi i1 [ false, %bb4 ], [ true, %bb0 ]
+ br i1 %3, label %bb1, label %Flow3, !amdgpu.uniform !0
+
+ bb1: ; preds = %Flow
+ br label %bb2, !amdgpu.uniform !0
+
+ Flow3: ; preds = %bb2, %Flow
+ %4 = phi <4 x i32> [ %vec2, %bb2 ], [ %2, %Flow ]
+ br label %bb3, !amdgpu.uniform !0
+
+ bb2: ; preds = %bb1
+ %elt0 = extractelement <4 x i32> %vec0, i32 0
+ %tmp = add i32 %elt0, %arg.load
+ %vec1 = insertelement <4 x i32> %vec0, i32 %tmp, i32 0
+ %elt1 = extractelement <4 x i32> %vec0, i32 1
+ %tmp1 = add i32 %elt1, %elt0
+ %vec2 = insertelement <4 x i32> %vec1, i32 %tmp1, i32 1
+ br label %Flow3, !amdgpu.uniform !0
+
+ bb3: ; preds = %Flow3
+ %out.load2 = extractelement <2 x i64> %0, i32 1
+ %5 = inttoptr i64 %out.load2 to ptr addrspace(1)
+ %vec3 = shufflevector <4 x i32> %4, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
+ %cast = bitcast <2 x i32> %vec3 to i64
+ %vec4 = shufflevector <4 x i32> %4, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
+ %cast1 = bitcast <2 x i32> %vec4 to i64
+ %res2 = mul i64 %cast, %cast1
+ %gep2 = getelementptr i64, ptr addrspace(1) %5, i32 22
+ store i64 %res2, ptr addrspace(1) %gep2, align 8
+ ret void
+
+ bb4: ; preds = %bb0
+ %elt2 = extractelement <4 x i32> %vec0, i32 1
+ %tmp2 = add i32 %elt2, %arg.load
+ %vec5 = insertelement <4 x i32> %vec0, i32 %tmp2, i32 2
+ br label %Flow, !amdgpu.uniform !0
+ }
+
+ declare noundef align 4 ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr() #2
+
+ declare { i1, i64 } @llvm.amdgcn.if.i64(i1) #3
+
+ declare { i1, i64 } @llvm.amdgcn.else.i64.i64(i64) #3
+
+ declare i64 @llvm.amdgcn.if.break.i64(i1, i64) #4
+
+ declare i1 @llvm.amdgcn.loop.i64(i64) #3
+
+ declare void @llvm.amdgcn.end.cf.i64(i64) #3
+
+ attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) "target-cpu"="gfx900" }
+ attributes #1 = { "target-cpu"="gfx900" }
+ attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+ attributes #3 = { nocallback nofree nounwind willreturn }
+ attributes #4 = { nocallback nofree nounwind willreturn memory(none) }
+
+ !0 = !{}
+
+...
+---
+name: test_subregs
+alignment: 1
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+callsEHReturn: false
+callsUnwindInit: false
+hasEHCatchret: false
+hasEHScopes: false
+hasEHFunclets: false
+isOutlined: false
+debugInstrRef: false
+failsVerification: false
+tracksDebugUserValues: false
+registers:
+ - { id: 0, class: sgpr_128, preferred-register: '' }
+ - { id: 1, class: sreg_32, preferred-register: '' }
+ - { id: 2, class: vreg_128, preferred-register: '' }
+ - { id: 3, class: vreg_128, preferred-register: '' }
+ - { id: 4, class: sreg_64_xexec, preferred-register: '' }
+ - { id: 5, class: vreg_128, preferred-register: '' }
+ - { id: 6, class: vreg_128, preferred-register: '' }
+ - { id: 7, class: vreg_128, preferred-register: '' }
+ - { id: 8, class: vgpr_32, preferred-register: '' }
+ - { id: 9, class: vgpr_32, preferred-register: '' }
+ - { id: 10, class: vgpr_32, preferred-register: '' }
+ - { id: 11, class: sgpr_64, preferred-register: '' }
+ - { id: 12, class: sgpr_64, preferred-register: '' }
+ - { id: 13, class: sgpr_64, preferred-register: '' }
+ - { id: 14, class: sgpr_32, preferred-register: '' }
+ - { id: 15, class: sgpr_32, preferred-register: '' }
+ - { id: 16, class: sgpr_32, preferred-register: '' }
+ - { id: 17, class: sgpr_32, preferred-register: '' }
+ - { id: 18, class: sgpr_128, preferred-register: '' }
+ - { id: 19, class: sreg_64, preferred-register: '' }
+ - { id: 20, class: sreg_32_xm0_xexec, preferred-register: '' }
+ - { id: 21, class: sreg_32, preferred-register: '' }
+ - { id: 22, class: sreg_32, preferred-register: '' }
+ - { id: 23, class: sreg_64, preferred-register: '' }
+ - { id: 24, class: sreg_32, preferred-register: '' }
+ - { id: 25, class: vgpr_32, preferred-register: '' }
+ - { id: 26, class: sreg_32, preferred-register: '' }
+ - { id: 27, class: sreg_64, preferred-register: '' }
+ - { id: 28, class: vgpr_32, preferred-register: '' }
+ - { id: 29, class: vgpr_32, preferred-register: '' }
+ - { id: 30, class: vgpr_32, preferred-register: '' }
+ - { id: 31, class: sreg_32, preferred-register: '' }
+ - { id: 32, class: sreg_32, preferred-register: '' }
+ - { id: 33, class: vgpr_32, preferred-register: '' }
+ - { id: 34, class: vgpr_32, preferred-register: '' }
+ - { id: 35, class: vreg_128, preferred-register: '' }
+ - { id: 36, class: vgpr_32, preferred-register: '' }
+ - { id: 37, class: vgpr_32, preferred-register: '' }
+ - { id: 38, class: sreg_32, preferred-register: '' }
+ - { id: 39, class: sreg_32, preferred-register: '' }
+ - { id: 40, class: sreg_64, preferred-register: '' }
+ - { id: 41, class: vgpr_32, preferred-register: '' }
+ - { id: 42, class: vgpr_32, preferred-register: '' }
+ - { id: 43, class: vgpr_32, preferred-register: '' }
+ - { id: 44, class: vgpr_32, preferred-register: '' }
+ - { id: 45, class: vgpr_32, preferred-register: '' }
+ - { id: 46, class: vgpr_32, preferred-register: '' }
+ - { id: 47, class: vgpr_32, preferred-register: '' }
+ - { id: 48, class: vreg_64, preferred-register: '' }
+ - { id: 49, class: sreg_64, preferred-register: '' }
+ - { id: 50, class: vgpr_32, preferred-register: '' }
+ - { id: 51, class: vgpr_32, preferred-register: '' }
+ - { id: 52, class: vgpr_32, preferred-register: '' }
+ - { id: 53, class: sreg_64, preferred-register: '' }
+ - { id: 54, class: vreg_64, preferred-register: '' }
+ - { id: 55, class: sgpr_32, preferred-register: '' }
+ - { id: 56, class: sgpr_32, preferred-register: '' }
+ - { id: 57, class: vreg_64, preferred-register: '' }
+ - { id: 58, class: sreg_64_xexec, preferred-register: '$vcc' }
+ - { id: 59, class: vreg_128, preferred-register: '' }
+ - { id: 60, class: vreg_128, preferred-register: '' }
+ - { id: 61, class: sreg_64_xexec, preferred-register: '' }
+ - { id: 62, class: vreg_128, preferred-register: '' }
+liveins:
+ - { reg: '$vgpr0', virtual-reg: '%8' }
+ - { reg: '$sgpr2_sgpr3', virtual-reg: '%12' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 1
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ functionContext: ''
+ maxCallFrameSize: 4294967295
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ hasTailCall: false
+ isCalleeSavedInfoValid: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack: []
+stack: []
+entry_values: []
+callSites: []
+debugValueSubstitutions: []
+constants: []
+machineFunctionInfo:
+ explicitKernArgSize: 20
+ maxKernArgAlign: 8
+ ldsSize: 0
+ gdsSize: 0
+ dynLDSAlign: 1
+ isEntryFunction: true
+ isChainFunction: false
+ noSignedZerosFPMath: false
+ memoryBound: false
+ waveLimiter: false
+ hasSpilledSGPRs: false
+ hasSpilledVGPRs: false
+ scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
+ frameOffsetReg: '$fp_reg'
+ stackPtrOffsetReg: '$sgpr32'
+ bytesInStackArgArea: 0
+ returnsVoid: true
+ argumentInfo:
+ dispatchPtr: { reg: '$sgpr0_sgpr1' }
+ kernargSegmentPtr: { reg: '$sgpr2_sgpr3' }
+ dispatchID: { reg: '$sgpr4_sgpr5' }
+ workGroupIDX: { reg: '$sgpr6' }
+ workGroupIDY: { reg: '$sgpr7' }
+ workGroupIDZ: { reg: '$sgpr8' }
+ privateSegmentWaveByteOffset: { reg: '$sgpr9' }
+ workItemIDX: { reg: '$vgpr0' }
+ workItemIDY: { reg: '$vgpr1' }
+ workItemIDZ: { reg: '$vgpr2' }
+ psInputAddr: 0
+ psInputEnable: 0
+ mode:
+ ieee: true
+ dx10-clamp: true
+ fp32-input-denormals: true
+ fp32-output-denormals: true
+ fp64-fp16-input-denormals: true
+ fp64-fp16-output-denormals: true
+ highBitsOf32BitAddress: 0
+ occupancy: 8
+ vgprForAGPRCopy: ''
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+ longBranchReservedReg: ''
+ hasInitWholeWave: false
+body: |
+ ; CHECK-LABEL: name: test_subregs
+ ; CHECK: bb.0.bb0:
+ ; CHECK-NEXT: successors: %bb.6(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $vgpr0, $sgpr2_sgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr2_sgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0
+ ; CHECK-NEXT: early-clobber %0:sgpr_128 = S_LOAD_DWORDX4_IMM_ec [[COPY]](p4), 36, 0 :: (dereferenceable invariant load (s128) from %ir.in.kernarg.offset, align 4, addrspace 4)
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 52, 0 :: (dereferenceable invariant load (s32) from %ir.arg.kernarg.offset, addrspace 4)
+ ; CHECK-NEXT: KILL [[COPY]](p4)
+ ; CHECK-NEXT: [[V_LSHLREV_B32_e32_:%[0-9]+]]:vgpr_32 = nuw nsw V_LSHLREV_B32_e32 2, [[COPY1]](s32), implicit $exec
+ ; CHECK-NEXT: [[GLOBAL_LOAD_DWORDX4_SADDR:%[0-9]+]]:vreg_128 = GLOBAL_LOAD_DWORDX4_SADDR %0.sub0_sub1, [[V_LSHLREV_B32_e32_]], 0, 0, implicit $exec :: (load (s128) from %ir.gep, addrspace 1)
+ ; CHECK-NEXT: S_CMP_LT_I32 [[S_LOAD_DWORD_IMM]], 11, implicit-def $scc
+ ; CHECK-NEXT: S_CBRANCH_SCC1 %bb.6, implicit killed $scc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 -1
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vreg_128 = IMPLICIT_DEF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.Flow:
+ ; CHECK-NEXT: successors: %bb.4(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI %62.sub3, %bb.6, [[DEF]].sub3, %bb.1
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI %62.sub0, %bb.6, [[DEF]].sub0, %bb.1
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI %63, %bb.6, [[DEF]].sub2, %bb.1
+ ; CHECK-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI %62.sub1, %bb.6, [[DEF]].sub1, %bb.1
+ ; CHECK-NEXT: [[PHI4:%[0-9]+]]:sreg_64_xexec = PHI %61, %bb.6, [[S_MOV_B64_]], %bb.1
+ ; CHECK-NEXT: $vcc = S_ANDN2_B64 $exec, [[PHI4]], implicit-def dead $scc
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.Flow3:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI %72.sub3, %bb.4, [[PHI]], %bb.2
+ ; CHECK-NEXT: [[PHI6:%[0-9]+]]:vgpr_32 = PHI %72.sub0, %bb.4, [[PHI1]], %bb.2
+ ; CHECK-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI %72.sub2, %bb.4, [[PHI2]], %bb.2
+ ; CHECK-NEXT: [[PHI8:%[0-9]+]]:vgpr_32 = PHI %72.sub1, %bb.4, [[PHI3]], %bb.2
+ ; CHECK-NEXT: S_BRANCH %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.bb2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[S_LOAD_DWORD_IMM]], [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0, implicit $exec
+ ; CHECK-NEXT: [[V_ADD_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[GLOBAL_LOAD_DWORDX4_SADDR]].sub1, [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0, implicit $exec
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[V_ADD_U32_e32_]]
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[V_ADD_U32_e32_1]], %subreg.sub1, [[GLOBAL_LOAD_DWORDX4_SADDR]].sub2_sub3, %subreg.sub2_sub3
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]]
+ ; CHECK-NEXT: S_BRANCH %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5.bb3:
+ ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[PHI8]], [[PHI7]], implicit $exec
+ ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[PHI6]], [[PHI5]], implicit $exec
+ ; CHECK-NEXT: [[V_MAD_U64_U32_e64_:%[0-9]+]]:vreg_64, dead [[V_MAD_U64_U32_e64_1:%[0-9]+]]:sreg_64 = V_MAD_U64_U32_e64 [[PHI6]], [[PHI7]], 0, 0, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: [[V_ADD3_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD3_U32_e64 [[V_MAD_U64_U32_e64_]].sub1, [[V_MUL_LO_U32_e64_1]], [[V_MUL_LO_U32_e64_]], implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD3_U32_e64_]], %subreg.sub1, [[V_MAD_U64_U32_e64_]].sub0, %subreg.sub0
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[V_MOV_B32_e32_]], [[REG_SEQUENCE1]], %0.sub2_sub3, 176, 0, implicit $exec :: (store (s64) into %ir.gep2, addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6.bb4:
+ ; CHECK-NEXT: successors: %bb.2(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_ADD_U32_e32_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[S_LOAD_DWORD_IMM]], [[GLOBAL_LOAD_DWORDX4_SADDR]].sub1, implicit $exec
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vreg_128 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[V_ADD_U32_e32_2]]
+ ; CHECK-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 0
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ bb.0.bb0:
+ successors: %bb.6(0x40000000), %bb.1(0x40000000)
+ liveins: $vgpr0, $sgpr2_sgpr3
+
+ %12:sgpr_64(p4) = COPY $sgpr2_sgpr3
+ %8:vgpr_32(s32) = COPY $vgpr0
+ early-clobber %0:sgpr_128 = S_LOAD_DWORDX4_IMM_ec %12(p4), 36, 0 :: (dereferenceable invariant load (s128) from %ir.in.kernarg.offset, align 4, addrspace 4)
+ %20:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %12(p4), 52, 0 :: (dereferenceable invariant load (s32) from %ir.arg.kernarg.offset, addrspace 4)
+ KILL %12(p4)
+ %25:vgpr_32 = nuw nsw V_LSHLREV_B32_e32 2, %8(s32), implicit $exec
+ %2:vreg_128 = GLOBAL_LOAD_DWORDX4_SADDR %0.sub0_sub1, %25, 0, 0, implicit $exec :: (load (s128) from %ir.gep, addrspace 1)
+ S_CMP_LT_I32 %20, 11, implicit-def $scc
+ S_CBRANCH_SCC1 %bb.6, implicit killed $scc
+
+ bb.1:
+ successors: %bb.2(0x80000000)
+
+ %61:sreg_64_xexec = S_MOV_B64 -1
+ %62:vreg_128 = IMPLICIT_DEF
+
+ bb.2.Flow:
+ successors: %bb.4(0x40000000), %bb.3(0x40000000)
+
+ $vcc = S_ANDN2_B64 $exec, %61, implicit-def dead $scc
+ S_CBRANCH_VCCNZ %bb.3, implicit $vcc
+ S_BRANCH %bb.4
+
+ bb.3.Flow3:
+ successors: %bb.5(0x80000000)
+
+ S_BRANCH %bb.5
+
+ bb.4.bb2:
+ successors: %bb.3(0x80000000)
+
+ %34:vgpr_32 = V_ADD_U32_e32 %20, %2.sub0, implicit $exec
+ %2.sub1:vreg_128 = V_ADD_U32_e32 %2.sub1, %2.sub0, implicit $exec
+ %2.sub0:vreg_128 = COPY %34
+ %62:vreg_128 = COPY %2
+ S_BRANCH %bb.3
+
+ bb.5.bb3:
+ %46:vgpr_32 = V_MUL_LO_U32_e64 %62.sub1, %62.sub2, implicit $exec
+ %47:vgpr_32 = V_MUL_LO_U32_e64 %62.sub0, %62.sub3, implicit $exec
+ %57:vreg_64, dead %49:sreg_64 = V_MAD_U64_U32_e64 %62.sub0, %62.sub2, 0, 0, implicit $exec
+ %41:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ %57.sub1:vreg_64 = V_ADD3_U32_e64 %57.sub1, %47, %46, implicit $exec
+ GLOBAL_STORE_DWORDX2_SADDR %41, %57, %0.sub2_sub3, 176, 0, implicit $exec :: (store (s64) into %ir.gep2, addrspace 1)
+ S_ENDPGM 0
+
+ bb.6.bb4:
+ successors: %bb.2(0x80000000)
+
+ %29:vgpr_32 = V_ADD_U32_e32 %20, %2.sub1, implicit $exec
+ %62:vreg_128 = COPY %2
+ %62.sub2:vreg_128 = COPY %29
+ %61:sreg_64_xexec = S_MOV_B64 0
+ S_BRANCH %bb.2
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/SSARA/test_spill.mir b/llvm/test/CodeGen/AMDGPU/SSARA/test_spill.mir
new file mode 100644
index 0000000000000..03e8a8509ff77
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/SSARA/test_spill.mir
@@ -0,0 +1,127 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=amdgpu-ssa-spiller %s -verify-machineinstrs -o - | FileCheck %s
+
+--- |
+ target triple = "amdgcn-amd-amdhsa"
+ target datalayout = "e-p:64:64"
+
+ define amdgpu_kernel void @test_full_register_spill() #0 {
+ entry:
+ ret void
+ }
+
+ attributes #0 = {
+ nounwind "amdgpu-num-vgpr"="8" "target-cpu"="gfx900"
+ }
+
+...
+---
+name: test_full_register_spill
+tracksRegLiveness: true
+fixedStack: []
+stack: []
+entry_values: []
+callSites: []
+debugValueSubstitutions: []
+constants: []
+
+machineFunctionInfo:
+ explicitKernArgSize: 8
+ maxKernArgAlign: 8
+ ldsSize: 0
+ gdsSize: 0
+ dynLDSAlign: 1
+ scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
+ frameOffsetReg: '$fp_reg'
+ stackPtrOffsetReg: '$sgpr32'
+ bytesInStackArgArea: 0
+ returnsVoid: true
+ argumentInfo:
+ kernargSegmentPtr: { reg: '$sgpr2_sgpr3' }
+body: |
+ ; CHECK-LABEL: name: test_full_register_spill
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $vgpr0, $sgpr0_sgpr1, $sgpr2_sgpr3, $sgpr4_sgpr5_sgpr6_sgpr7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %idx:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: dead %soffset:sgpr_64 = COPY $sgpr2_sgpr3
+ ; CHECK-NEXT: %rsrc:sgpr_128 = COPY $sgpr4_sgpr5_sgpr6_sgpr7
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_3:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_4:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4, implicit $exec
+ ; CHECK-NEXT: dead [[V_MOV_B32_e32_5:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 5, implicit $exec
+ ; CHECK-NEXT: dead [[V_MOV_B32_e32_6:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 6, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_7:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 7, implicit $exec
+ ; CHECK-NEXT: dead %offset:vgpr_32 = V_LSHLREV_B32_e32 4, %idx, implicit $exec
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX4_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %rsrc, 0, 64, 0, 0, implicit $exec
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE killed [[V_MOV_B32_e32_7]], %stack.0, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.0, addrspace 5)
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE killed [[V_MOV_B32_e32_4]], %stack.1, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.1, addrspace 5)
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE killed [[V_MOV_B32_e32_2]], %stack.2, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
+ ; CHECK-NEXT: %cmp:sreg_64 = V_CMP_NE_U32_e64 %idx, [[V_MOV_B32_e32_3]], implicit $exec
+ ; CHECK-NEXT: %exec_mask:sreg_64 = S_AND_B64 $exec, %cmp, implicit-def $exec, implicit-def $scc
+ ; CHECK-NEXT: $exec = COPY %exec_mask
+ ; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_1]], implicit $exec
+ ; CHECK-NEXT: [[SI_SPILL_V32_RESTORE:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
+ ; CHECK-NEXT: [[SI_SPILL_V32_RESTORE1:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.1, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.1, addrspace 5)
+ ; CHECK-NEXT: dead [[V_ADD_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[SI_SPILL_V32_RESTORE]], [[SI_SPILL_V32_RESTORE1]], implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_OFFSET]].sub1, implicit $exec
+ ; CHECK-NEXT: [[SI_SPILL_V32_RESTORE2:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.0, addrspace 5)
+ ; CHECK-NEXT: dead [[V_ADD_U32_e32_2:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[COPY]], [[SI_SPILL_V32_RESTORE2]], implicit $exec
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: $exec = S_OR_B64 $exec, %cmp, implicit-def $scc
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ liveins: $vgpr0, $sgpr0_sgpr1, $sgpr2_sgpr3, $sgpr4_sgpr5_sgpr6_sgpr7
+
+ %idx:vgpr_32 = COPY $vgpr0
+ %soffset:sgpr_64 = COPY $sgpr2_sgpr3
+ %rsrc:sgpr_128 = COPY $sgpr4_sgpr5_sgpr6_sgpr7
+
+ %0:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+ %2:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+ %3:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
+ %4:vgpr_32 = V_MOV_B32_e32 4, implicit $exec
+ %5:vgpr_32 = V_MOV_B32_e32 5, implicit $exec
+ %6:vgpr_32 = V_MOV_B32_e32 6, implicit $exec
+ %7:vgpr_32 = V_MOV_B32_e32 7, implicit $exec
+
+ %offset:vgpr_32 = V_LSHLREV_B32_e32 4, %idx, implicit $exec
+
+ %8:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET %rsrc, 0, 64, 0, 0, implicit $exec
+
+ %cmp:sreg_64 = V_CMP_NE_U32_e64 %idx, %3, implicit $exec
+ %exec_mask:sreg_64 = S_AND_B64 $exec, %cmp, implicit-def $exec, implicit-def $scc
+ $exec = COPY %exec_mask
+
+
+ S_CBRANCH_EXECZ %bb.2, implicit $exec
+ S_BRANCH %bb.1
+
+ bb.1:
+ %9:vgpr_32 = V_ADD_U32_e32 %0, %1, implicit $exec
+ %10:vgpr_32 = V_ADD_U32_e32 %2, %4, implicit $exec
+ S_BRANCH %bb.3
+
+ bb.2:
+ %11:vgpr_32 = COPY %8.sub1, implicit $exec
+ %12:vgpr_32 = V_ADD_U32_e32 %11, %7, implicit $exec
+
+ bb.3:
+ $exec = S_OR_B64 $exec, %cmp, implicit-def $scc
+ S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/SSARA/test_spill_subregs.mir b/llvm/test/CodeGen/AMDGPU/SSARA/test_spill_subregs.mir
new file mode 100644
index 0000000000000..bb333a46e8655
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/SSARA/test_spill_subregs.mir
@@ -0,0 +1,144 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=amdgpu-ssa-spiller %s -verify-machineinstrs -o - | FileCheck %s
+
+--- |
+ define amdgpu_kernel void @test_subreg_spill() #0 {
+ entry:
+ ret void
+ }
+
+ attributes #0 = {
+ nounwind "amdgpu-num-vgpr"="8" "target-cpu"="gfx900"
+ }
+...
+
+---
+name: test_subreg_spill
+tracksRegLiveness: true
+fixedStack: []
+stack: []
+entry_values: []
+callSites: []
+debugValueSubstitutions: []
+constants: []
+
+machineFunctionInfo:
+ explicitKernArgSize: 8
+ maxKernArgAlign: 8
+ ldsSize: 0
+ gdsSize: 0
+ dynLDSAlign: 1
+
+ scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
+ frameOffsetReg: '$fp_reg'
+ stackPtrOffsetReg: '$sgpr32'
+
+ bytesInStackArgArea: 0
+ returnsVoid: true
+ argumentInfo:
+ kernargSegmentPtr: { reg: '$sgpr2_sgpr3' }
+
+body: |
+ ; CHECK-LABEL: name: test_subreg_spill
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %idx:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: %a:vgpr_32 = V_MOV_B32_e32 10, implicit $exec
+ ; CHECK-NEXT: %b:vgpr_32 = V_MOV_B32_e32 20, implicit $exec
+ ; CHECK-NEXT: %c:vgpr_32 = V_MOV_B32_e32 30, implicit $exec
+ ; CHECK-NEXT: %d:vgpr_32 = V_MOV_B32_e32 40, implicit $exec
+ ; CHECK-NEXT: %large:vreg_128 = REG_SEQUENCE %a, %subreg.sub0, %b, %subreg.sub1, %c, %subreg.sub2, %d, %subreg.sub3
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_3:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4, implicit $exec
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE %large.sub2, %stack.0, $sgpr32, 0, implicit $exec :: (store (s128) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: [[V_MOV_B32_e32_4:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 5, implicit $exec
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE %large.sub1, %stack.1, $sgpr32, 0, implicit $exec :: (store (s128) into %stack.1, align 4, addrspace 5)
+ ; CHECK-NEXT: [[V_MOV_B32_e32_5:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 6, implicit $exec
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE %large.sub0, %stack.2, $sgpr32, 0, implicit $exec :: (store (s128) into %stack.2, align 4, addrspace 5)
+ ; CHECK-NEXT: [[V_MOV_B32_e32_6:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 7, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_7:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 8, implicit $exec
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE killed [[V_MOV_B32_e32_6]], %stack.3, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.3, addrspace 5)
+ ; CHECK-NEXT: %cmp:sreg_64 = V_CMP_NE_U32_e64 %idx, [[V_MOV_B32_e32_3]], implicit $exec
+ ; CHECK-NEXT: $vcc = COPY %cmp
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %use1:vgpr_32 = V_ADD_U32_e32 [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_1]], implicit $exec
+ ; CHECK-NEXT: [[SI_SPILL_V32_RESTORE:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: %s2:vgpr_32 = COPY [[SI_SPILL_V32_RESTORE]], implicit $exec
+ ; CHECK-NEXT: %r1:vgpr_32 = V_ADD_U32_e32 %s2, %use1, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %use2:vgpr_32 = V_ADD_U32_e32 [[V_MOV_B32_e32_2]], [[V_MOV_B32_e32_4]], implicit $exec
+ ; CHECK-NEXT: [[SI_SPILL_V32_RESTORE1:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.1, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.1, align 4, addrspace 5)
+ ; CHECK-NEXT: %s1:vgpr_32 = COPY [[SI_SPILL_V32_RESTORE1]], implicit $exec
+ ; CHECK-NEXT: %r2:vgpr_32 = V_ADD_U32_e32 %s1, %use2, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: %phi_r:vgpr_32 = PHI %r1, %bb.1, %r2, %bb.2
+ ; CHECK-NEXT: %phi_use:vgpr_32 = PHI %use1, %bb.1, %use2, %bb.2
+ ; CHECK-NEXT: [[SI_SPILL_V32_RESTORE2:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.2, align 4, addrspace 5)
+ ; CHECK-NEXT: %s0:vgpr_32 = COPY [[SI_SPILL_V32_RESTORE2]], implicit $exec
+ ; CHECK-NEXT: dead %final:vgpr_32 = V_ADD_U32_e32 %phi_r, %s0, implicit $exec
+ ; CHECK-NEXT: [[SI_SPILL_V32_RESTORE3:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.3, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.3, addrspace 5)
+ ; CHECK-NEXT: dead %u1:vgpr_32 = V_ADD_U32_e32 [[V_MOV_B32_e32_5]], [[SI_SPILL_V32_RESTORE3]], implicit $exec
+ ; CHECK-NEXT: dead %u2:vgpr_32 = V_ADD_U32_e32 [[V_MOV_B32_e32_7]], %phi_use, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000); %bb.1(50.00%), %bb.2(50.00%)
+ liveins: $vgpr0
+ %idx:vgpr_32 = COPY $vgpr0
+ %a:vgpr_32 = V_MOV_B32_e32 10, implicit $exec
+ %b:vgpr_32 = V_MOV_B32_e32 20, implicit $exec
+ %c:vgpr_32 = V_MOV_B32_e32 30, implicit $exec
+ %d:vgpr_32 = V_MOV_B32_e32 40, implicit $exec
+ %large:vreg_128 = REG_SEQUENCE %a:vgpr_32, %subreg.sub0, %b:vgpr_32, %subreg.sub1, %c:vgpr_32, %subreg.sub2, %d:vgpr_32, %subreg.sub3
+ %6:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+ %7:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+ %8:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
+ %9:vgpr_32 = V_MOV_B32_e32 4, implicit $exec
+ %10:vgpr_32 = V_MOV_B32_e32 5, implicit $exec
+ %11:vgpr_32 = V_MOV_B32_e32 6, implicit $exec
+ %12:vgpr_32 = V_MOV_B32_e32 7, implicit $exec
+ %13:vgpr_32 = V_MOV_B32_e32 8, implicit $exec
+ %cmp:sreg_64 = V_CMP_NE_U32_e64 %idx:vgpr_32, %9:vgpr_32, implicit $exec
+ $vcc = COPY %cmp:sreg_64
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.1:
+ successors: %bb.3(0x80000000)
+ %use1:vgpr_32 = V_ADD_U32_e32 %6, %7, implicit $exec
+ %s2:vgpr_32 = COPY %large.sub2, implicit $exec
+ %r1:vgpr_32 = V_ADD_U32_e32 %s2, %use1, implicit $exec
+ S_BRANCH %bb.3
+
+ bb.2:
+ successors: %bb.3(0x80000000)
+ %use2:vgpr_32 = V_ADD_U32_e32 %8, %10, implicit $exec
+ %s1:vgpr_32 = COPY %large.sub1, implicit $exec
+ %r2:vgpr_32 = V_ADD_U32_e32 %s1, %use2, implicit $exec
+ S_BRANCH %bb.3
+
+ bb.3:
+ %phi_r:vgpr_32 = PHI %r1, %bb.1, %r2, %bb.2
+ %phi_use:vgpr_32 = PHI %use1, %bb.1, %use2, %bb.2
+ %s0:vgpr_32 = COPY %large.sub0, implicit $exec
+ %final:vgpr_32 = V_ADD_U32_e32 %phi_r, %s0, implicit $exec
+ %u1:vgpr_32 = V_ADD_U32_e32 %11, %12, implicit $exec
+ %u2:vgpr_32 = V_ADD_U32_e32 %13, %phi_use, implicit $exec
+ S_ENDPGM 0
+
+
+
>From 6cecfcbe15df8509c76ea22db07a5bbe6fb93ded Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Mon, 11 Aug 2025 14:33:46 +0000
Subject: [PATCH 43/46] Rebuild SSA using LIS. 1st Edition.
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 425 +++++++++++++++---
.../AMDGPU/SSARA/if_loop_with_subregs.mir | 52 +--
.../AMDGPU/SSARA/test_rebuild_ssa_subregs.mir | 25 +-
3 files changed, 392 insertions(+), 110 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index 7f280a43d4cb2..ef6cc1d38253b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -1,17 +1,21 @@
#include "AMDGPU.h"
+#include "AMDGPUSSARAUtils.h"
#include "GCNSubtarget.h"
+#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineSSAUpdater.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/InitializePasses.h"
#include "llvm/Passes/PassBuilder.h"
#include "llvm/Passes/PassPlugin.h"
#include "llvm/Support/GenericIteratedDominanceFrontier.h"
-#include "AMDGPUSSARAUtils.h"
+#include <algorithm>
#include <stack>
#include "VRegMaskPair.h"
@@ -28,6 +32,18 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
const SIInstrInfo *TII;
const SIRegisterInfo *TRI;
MachineRegisterInfo *MRI;
+ MachineLoopInfo *MLI;
+
+ DenseMap<MachineOperand *, std::pair<MachineInstr *, LaneBitmask>>
+ RegSeqences;
+
+ void buildRealPHI(VNInfo *VNI, LiveInterval &LI,
+ Register OldVR);
+ void splitNonPhiValue(VNInfo *VNI,
+ LiveInterval &LI, Register OldVR);
+ void rewriteUses(MachineInstr *DefMI, Register OldVR,
+ LaneBitmask MaskToRewrite, Register NewVR, LiveInterval &LI,
+ VNInfo *VNI);
typedef struct {
Register CurName;
@@ -322,6 +338,7 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredTransitiveID(MachineDominatorsID);
AU.addPreservedID(MachineDominatorsID);
+ AU.addRequired<MachineLoopInfoWrapperPass>();
AU.addRequired<LiveIntervalsWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -362,12 +379,205 @@ void AMDGPURebuildSSALegacy::collectCrossBlockVRegs(MachineFunction &MF) {
}
}
+void AMDGPURebuildSSALegacy::buildRealPHI(VNInfo *VNI, LiveInterval &LI,
+ Register OldVR) {
+ MachineBasicBlock *DefMBB = LIS->getMBBFromIndex(VNI->def);
+ SmallVector<MachineOperand> Ops;
+ LaneBitmask CurrMask = LaneBitmask::getNone();
+ LaneBitmask PredMask = LaneBitmask::getNone();
+ LaneBitmask FullMask = MRI->getMaxLaneMaskForVReg(OldVR);
+ unsigned SubRegIdx = AMDGPU::NoRegister;
+ dbgs() << "\nBuild PHI for register: " << printReg(OldVR) << "\n";
+ for (auto Pred : DefMBB->predecessors()) {
+ dbgs() << "Pred: MBB_" << Pred->getNumber() << "\n";
+ SlotIndex LastPredIdx = LIS->getMBBEndIdx(Pred);
+
+ for (const LiveInterval::SubRange &SR : LI.subranges()) {
+ // Does this sub-range contain *any* segment that refers to V ?
+ if (auto V = SR.getVNInfoBefore(LastPredIdx)) {
+ PredMask |= SR.LaneMask; // this lane mask is live-out of Pred
+ dbgs() << "Mask : " << PrintLaneMask(SR.LaneMask) << " VNINfo: " << V
+ << " id: " << V->id << "Def: " << V->def << "\n";
+ }
+ }
+
+ if (!PredMask.none() && (FullMask & ~PredMask).any()) {
+ // Not all lanes are merged here
+ dbgs() << "Partial register merge\n";
+ dbgs() << "PredMask: " << PrintLaneMask(PredMask) << "\n";
+ SubRegIdx = getSubRegIndexForLaneMask(PredMask, TRI);
+ } else {
+ // Full register merge
+ dbgs() << "Full register merge\n";
+ if (PredMask.none()) {
+ dbgs() << "No sub-ranges\n";
+ } else {
+ dbgs() << "All sub-ranges are merging. PredMask: "
+ << PrintLaneMask(PredMask) << "\n";
+ }
+ }
+ assert(CurrMask.none() || (CurrMask == PredMask));
+ CurrMask = PredMask;
+
+ Ops.push_back(
+ MachineOperand::CreateReg(OldVR, 0, 0, 0, 0, 0, 0, SubRegIdx));
+ Ops.push_back(MachineOperand::CreateMBB(Pred));
+ }
+
+ const TargetRegisterClass *RC =
+ TRI->getRegClassForOperandReg(*MRI, Ops.front());
+
+ Register DestReg =
+ MRI->createVirtualRegister(RC);
+
+ auto PHINode = BuildMI(*DefMBB, DefMBB->begin(), DebugLoc(),
+ TII->get(TargetOpcode::PHI), DestReg)
+ .add(ArrayRef(Ops));
+
+ MachineInstr *PHI = PHINode.getInstr();
+ LIS->InsertMachineInstrInMaps(*PHI);
+
+ rewriteUses(PHI, OldVR, CurrMask.none() ? FullMask : CurrMask, DestReg, LI,
+ VNI);
+ LIS->createAndComputeVirtRegInterval(DestReg);
+}
+
+void AMDGPURebuildSSALegacy::splitNonPhiValue(VNInfo *VNI, LiveInterval &LI,
+ Register OldVR) {
+ MachineInstr *DefMI = LIS->getInstructionFromIndex(VNI->def);
+ int DefIdx = DefMI->findRegisterDefOperandIdx(OldVR, TRI, false, true);
+ MachineOperand &MO = DefMI->getOperand(DefIdx);
+ unsigned SubRegIdx = MO.getSubReg();
+ LaneBitmask Mask = SubRegIdx ? TRI->getSubRegIndexLaneMask(SubRegIdx)
+ : MRI->getMaxLaneMaskForVReg(MO.getReg());
+ const TargetRegisterClass *RC = TRI->getRegClassForOperandReg(*MRI, MO);
+ Register NewVR = MRI->createVirtualRegister(RC);
+ MO.setReg(NewVR);
+ MO.setSubReg(AMDGPU::NoRegister);
+ MO.setIsUndef(false);
+ LIS->ReplaceMachineInstrInMaps(*DefMI, *DefMI);
+ rewriteUses(DefMI, OldVR, Mask, NewVR, LI, VNI);
+
+ LIS->createAndComputeVirtRegInterval(NewVR);
+}
+
+void AMDGPURebuildSSALegacy::rewriteUses(MachineInstr *DefMI, Register OldVR,
+ LaneBitmask MaskToRewrite, Register NewVR,
+ LiveInterval &LI, VNInfo *VNI) {
+ for (MachineOperand &MO :
+ llvm::make_early_inc_range(MRI->use_operands(OldVR))) {
+ MachineInstr *UseMI = MO.getParent();
+ if (DefMI == UseMI)
+ continue;
+ SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI);
+
+ if (UseMI->getParent() == DefMI->getParent()) {
+ SlotIndex DefIdx = LIS->getInstructionIndex(*DefMI);
+
+ if (DefIdx >= UseIdx) {
+ if (MLI->isLoopHeader(UseMI->getParent()) && UseMI->isPHI()) {
+ unsigned OpIdx = UseMI->getOperandNo(&MO);
+ MachineBasicBlock *Pred = UseMI->getOperand(++OpIdx).getMBB();
+ SlotIndex PredEnd = LIS->getMBBEndIdx(Pred);
+ VNInfo *InV = LI.getVNInfoBefore(PredEnd);
+
+ if (InV != VNI)
+ continue;
+ } else
+ continue;
+ }
+ } else {
+ if (UseMI->isPHI()) {
+ unsigned OpIdx = UseMI->getOperandNo(&MO);
+ MachineBasicBlock *Pred = UseMI->getOperand(++OpIdx).getMBB();
+ SlotIndex PredEnd = LIS->getMBBEndIdx(Pred);
+ VNInfo *InV = LI.getVNInfoBefore(PredEnd);
+
+ if (InV != VNI)
+ continue;
+ } else if (!MDT->dominates(DefMI->getParent(), UseMI->getParent()))
+ continue;
+ }
+ const TargetRegisterClass *NewRC = TRI->getRegClassForReg(*MRI, NewVR);
+ const TargetRegisterClass *OpRC = TRI->getRegClassForOperandReg(*MRI, MO);
+ LaneBitmask OpMask = MRI->getMaxLaneMaskForVReg(MO.getReg());
+ if (MO.getSubReg()) {
+ OpMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
+ }
+ if ((OpMask & MaskToRewrite).none())
+ continue;
+ if (isOfRegClass(getRegSubRegPair(MO), *NewRC, *MRI) &&
+ OpMask == MaskToRewrite) {
+ MO.setReg(NewVR);
+ MO.setSubReg(AMDGPU::NoRegister);
+ } else {
+ if ((OpMask & ~MaskToRewrite).any()) {
+ // super-register use
+ LaneBitmask Mask = LaneBitmask::getNone();
+ // We need to explicitly inform LIS that the subreg is live up to the
+ // REG_SEQUENCE
+ LaneBitmask SubRangeToExtend = LaneBitmask::getNone();
+ Register DestReg = MRI->createVirtualRegister(OpRC);
+ MachineBasicBlock::iterator IP(UseMI);
+ if (UseMI->isPHI()) {
+ unsigned OpIdx = UseMI->getOperandNo(&MO);
+ MachineBasicBlock *Pred = UseMI->getOperand(++OpIdx).getMBB();
+ IP = Pred->getFirstTerminator();
+ }
+ auto RS = BuildMI(*IP->getParent(), IP, IP->getDebugLoc(),
+ TII->get(TargetOpcode::REG_SEQUENCE), DestReg);
+ for (const LiveInterval::SubRange &SR : LI.subranges()) {
+ // Does this sub-range contain *any* segment that refers to V ?
+ if (SR.getVNInfoAt(UseIdx)) {
+ Mask = SR.LaneMask; // this lane mask is live-out of Pred
+ dbgs() << PrintLaneMask(Mask) << "\n";
+ unsigned SubRegIdx = getSubRegIndexForLaneMask(Mask, TRI);
+ if (Mask == MaskToRewrite)
+ RS.addReg(NewVR).addImm(SubRegIdx);
+ else {
+ RS.addReg(OldVR, 0, SubRegIdx).addImm(SubRegIdx);
+ // We only save the mask for those sub-regs which have not been
+ // rewriten. For the rewiritten we will call the
+ // createAndComputeLiveREgInterval afterwords.
+ SubRangeToExtend = SR.LaneMask;
+ }
+ }
+ }
+ auto RSIdx = LIS->InsertMachineInstrInMaps(*RS);
+ LIS->extendToIndices(LI, ArrayRef(RSIdx));
+ for (auto &SR : LI.subranges()) {
+ if (SR.LaneMask == SubRangeToExtend)
+ LIS->extendToIndices(SR, ArrayRef(RSIdx));
+ }
+ MO.setReg(RS->getOperand(0).getReg());
+ } else if ((OpMask & MaskToRewrite) == OpMask) {
+ // sub-register use
+ if (UseMI->isPHI()) {
+ unsigned OpIdx = UseMI->getOperandNo(&MO);
+ MachineBasicBlock *Pred = UseMI->getOperand(++OpIdx).getMBB();
+ SlotIndex PredEnd = LIS->getMBBEndIdx(Pred);
+ VNInfo *InV = LI.getVNInfoBefore(PredEnd);
+
+ if (InV != VNI)
+ continue;
+ }
+ unsigned SubRegIdx = MO.getSubReg();
+ assert(SubRegIdx != AMDGPU::NoRegister &&
+ "Sub-register must not be zero");
+ MO.setReg(NewVR);
+ MO.setSubReg(SubRegIdx);
+ }
+ }
+ }
+}
+
bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
TII = MF.getSubtarget<GCNSubtarget>().getInstrInfo();
MRI = &MF.getRegInfo();
TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
+ MLI = &getAnalysis<MachineLoopInfoWrapperPass>().getLI();
if (MRI->isSSA())
return false;
@@ -380,83 +590,159 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
Renamed.clear();
Visited.clear();
- // Collect all cross-block virtual registers.
- // This includes registers that are live-in to the function, and registers
- // that are defined in multiple blocks.
- // We will insert PHI nodes for these registers.
- collectCrossBlockVRegs(MF);
-
- LLVM_DEBUG(dbgs() << "##### Virt regs live cross block ##################\n";
- for (auto VMP : CrossBlockVRegs) { dbgs() << printVMP(VMP) << " "; });
-
- for (auto VMP : CrossBlockVRegs) {
- SmallVector<MachineBasicBlock *> PHIBlocks;
- LiveInterval &LI = LIS->getInterval(VMP.getVReg());
- if (LI.hasSubRanges()) {
- for (const LiveInterval::SubRange &SR : LI.subranges()) {
- LaneBitmask Mask = SR.LaneMask;
- if ((Mask & VMP.getLaneMask()) == VMP.getLaneMask()) {
- for (auto &MBB : MF) {
- if (SR.liveAt(LIS->getMBBStartIdx(&MBB)))
- LiveInBlocks[VMP].insert(&MBB);
+ DenseSet<Register> Processed;
+
+ for (auto &B : MF) {
+ for (auto &I : B) {
+ for (auto Def : I.defs()) {
+ if (Def.isReg() && Def.getReg().isVirtual()) {
+ Register VReg = Def.getReg();
+ if (!LIS->hasInterval(VReg) || !Processed.insert(VReg).second)
+ continue;
+ auto &LI = LIS->getInterval(VReg);
+ if (LI.getNumValNums() == 1)
+ continue;
+
+ SmallVector<VNInfo *, 8> WorkList;
+ for (VNInfo *V : LI.vnis())
+ // for (const LiveInterval::SubRange &SR : LI.subranges())
+ // for (auto V : SR.vnis())
+ if (V && !V->isUnused())
+ WorkList.push_back(V);
+
+ auto DomKey = [&](VNInfo *V) {
+ MachineBasicBlock *BB = LIS->getMBBFromIndex(V->def);
+ // DomTree preorder index (DFS number) – cheaper than repeated
+ // dominates()
+ static DenseMap<MachineBasicBlock *, unsigned> Num;
+ if (Num.empty()) {
+ unsigned N = 0;
+ for (auto *Node : depth_first(MDT->getRootNode()))
+ Num[Node->getBlock()] = N++;
+ }
+ return std::pair{Num[BB], V->def}; // tie-break with SlotIndex
+ };
+
+ llvm::sort(WorkList, [&](VNInfo *A, VNInfo *B) {
+ return DomKey(A) < DomKey(B); // strict weak order
+ });
+
+ for (auto V : WorkList) {
+ dbgs() << "id: " << V->id << " Def: " << V->def
+ << " isPHI: " << V->isPHIDef() << "\n";
+ }
+
+
+ // --- the root is now Work[0] ---
+ VNInfo *Root = WorkList.front(); // dominator of all others
+ // 2. stable-partition: PHIs (except root) to the front
+ auto IsPhi = [&](VNInfo *V) { return V != Root && V->isPHIDef(); };
+ auto Mid =
+ std::stable_partition(WorkList.begin(), WorkList.end(), IsPhi);
+
+ // 3. Phase A: build real PHIs, leave incoming defs unchanged
+ auto PHISlice =
+ llvm::ArrayRef(WorkList).take_front(Mid - WorkList.begin());
+ for (auto It = PHISlice.rbegin(); It != PHISlice.rend(); ++It) {
+ // Add PHIs in post-dominating order
+ buildRealPHI(*It, LI, VReg);
}
- }
- }
- } else {
- for (auto &MBB : MF) {
- if (LI.liveAt(LIS->getMBBStartIdx(&MBB)))
- LiveInBlocks[VMP].insert(&MBB);
- }
- }
- SmallPtrSet<MachineBasicBlock *, 8> Defs;
- for(auto E : DefBlocks) {
- auto V = E.first;
- if (V.getVReg() == VMP.getVReg()) {
- if ((V.getLaneMask() & VMP.getLaneMask()) == VMP.getLaneMask()) {
- Defs.insert(E.second.begin(), E.second.end());
- }
- }
- }
+ // 4. Phase B: split the remaining VNIs
+ for (VNInfo *VNI : llvm::ArrayRef(WorkList).slice(Mid - WorkList.begin())) {
+ if (VNI == Root)
+ continue; // never touch the dominating root
+ splitNonPhiValue(VNI, LI, VReg);
+ }
- LLVM_DEBUG(
- dbgs() << "findPHINodesPlacement input:\nVreg: "
- << printVMP(VMP)
- << "\n";
- dbgs() << "Def Blocks: \n"; for (auto MBB
- : Defs) {
- dbgs() << "MBB_" << MBB->getNumber() << " ";
- } dbgs() << "\nLiveIn Blocks: \n";
- for (auto MBB
- : LiveInBlocks[VMP]) {
- dbgs() << "MBB_" << MBB->getNumber() << " ";
- } dbgs()
- << "\n");
-
- findPHINodesPlacement(LiveInBlocks[VMP], Defs, PHIBlocks);
- LLVM_DEBUG(dbgs() << "\nBlocks to insert PHI nodes:\n"; for (auto MBB
- : PHIBlocks) {
- dbgs() << "MBB_" << MBB->getNumber() << " ";
- } dbgs() << "\n");
- for (auto MBB : PHIBlocks) {
- if (!PHINodes[MBB->getNumber()].contains(VMP)) {
- // Insert PHI for VReg. Don't use new VReg here as we'll replace them
- // in renaming phase.
- printVMP(VMP);
- auto PHINode =
- BuildMI(*MBB, MBB->begin(), DebugLoc(), TII->get(TargetOpcode::PHI))
- .addReg(VMP.getVReg(), RegState::Define, VMP.getSubReg(MRI, TRI));
- PHINodes[MBB->getNumber()].insert(VMP);
- PHIMap[PHINode] = VMP;
+ // 5. single clean-up
+ // LIS->shrinkToUses(&LI);
+ LI.RenumberValues();
+ }
}
}
}
- // Rename virtual registers in the basic block.
- DenseMap<unsigned, VRegDefStack> VregNames;
- renameVRegs(MF.front(), VregNames);
+ Processed.clear();
+
+ // // Collect all cross-block virtual registers.
+ // // This includes registers that are live-in to the function, and registers
+ // // that are defined in multiple blocks.
+ // // We will insert PHI nodes for these registers.
+ // collectCrossBlockVRegs(MF);
+
+ // LLVM_DEBUG(dbgs() << "##### Virt regs live cross block ##################\n";
+ // for (auto VMP : CrossBlockVRegs) { dbgs() << printVMP(VMP) << " "; });
+
+ // for (auto VMP : CrossBlockVRegs) {
+ // SmallVector<MachineBasicBlock *> PHIBlocks;
+ // LiveInterval &LI = LIS->getInterval(VMP.getVReg());
+ // if (LI.hasSubRanges()) {
+ // for (const LiveInterval::SubRange &SR : LI.subranges()) {
+ // LaneBitmask Mask = SR.LaneMask;
+ // if ((Mask & VMP.getLaneMask()) == VMP.getLaneMask()) {
+ // for (auto &MBB : MF) {
+ // if (SR.liveAt(LIS->getMBBStartIdx(&MBB)))
+ // LiveInBlocks[VMP].insert(&MBB);
+ // }
+ // }
+ // }
+ // } else {
+ // for (auto &MBB : MF) {
+ // if (LI.liveAt(LIS->getMBBStartIdx(&MBB)))
+ // LiveInBlocks[VMP].insert(&MBB);
+ // }
+ // }
+
+ // SmallPtrSet<MachineBasicBlock *, 8> Defs;
+ // for(auto E : DefBlocks) {
+ // auto V = E.first;
+ // if (V.getVReg() == VMP.getVReg()) {
+ // if ((V.getLaneMask() & VMP.getLaneMask()) == VMP.getLaneMask()) {
+ // Defs.insert(E.second.begin(), E.second.end());
+ // }
+ // }
+ // }
+
+ // LLVM_DEBUG(
+ // dbgs() << "findPHINodesPlacement input:\nVreg: "
+ // << printVMP(VMP)
+ // << "\n";
+ // dbgs() << "Def Blocks: \n"; for (auto MBB
+ // : Defs) {
+ // dbgs() << "MBB_" << MBB->getNumber() << " ";
+ // } dbgs() << "\nLiveIn Blocks: \n";
+ // for (auto MBB
+ // : LiveInBlocks[VMP]) {
+ // dbgs() << "MBB_" << MBB->getNumber() << " ";
+ // } dbgs()
+ // << "\n");
+
+ // findPHINodesPlacement(LiveInBlocks[VMP], Defs, PHIBlocks);
+ // LLVM_DEBUG(dbgs() << "\nBlocks to insert PHI nodes:\n"; for (auto MBB
+ // : PHIBlocks) {
+ // dbgs() << "MBB_" << MBB->getNumber() << " ";
+ // } dbgs() << "\n");
+ // for (auto MBB : PHIBlocks) {
+ // if (!PHINodes[MBB->getNumber()].contains(VMP)) {
+ // // Insert PHI for VReg. Don't use new VReg here as we'll replace them
+ // // in renaming phase.
+ // printVMP(VMP);
+ // auto PHINode =
+ // BuildMI(*MBB, MBB->begin(), DebugLoc(), TII->get(TargetOpcode::PHI))
+ // .addReg(VMP.getVReg(), RegState::Define, VMP.getSubReg(MRI, TRI));
+ // PHINodes[MBB->getNumber()].insert(VMP);
+ // PHIMap[PHINode] = VMP;
+ // }
+ // }
+ // }
+
+ // // Rename virtual registers in the basic block.
+ // DenseMap<unsigned, VRegDefStack> VregNames;
+ // renameVRegs(MF.front(), VregNames);
MF.getProperties().set(MachineFunctionProperties::Property::IsSSA);
MF.getProperties().reset(MachineFunctionProperties::Property ::NoPHIs);
+ MF.verify();
return MRI->isSSA();
}
@@ -465,6 +751,7 @@ char AMDGPURebuildSSALegacy::ID = 0;
INITIALIZE_PASS_BEGIN(AMDGPURebuildSSALegacy, DEBUG_TYPE, "AMDGPU Rebuild SSA",
false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(MachineLoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LiveIntervalsWrapperPass)
INITIALIZE_PASS_END(AMDGPURebuildSSALegacy, DEBUG_TYPE, "AMDGPU Rebuild SSA",
false, false)
diff --git a/llvm/test/CodeGen/AMDGPU/SSARA/if_loop_with_subregs.mir b/llvm/test/CodeGen/AMDGPU/SSARA/if_loop_with_subregs.mir
index 0ac66ea32f19e..3ceb8a76cfa9c 100644
--- a/llvm/test/CodeGen/AMDGPU/SSARA/if_loop_with_subregs.mir
+++ b/llvm/test/CodeGen/AMDGPU/SSARA/if_loop_with_subregs.mir
@@ -364,29 +364,29 @@ body: |
; CHECK-NEXT: successors: %bb.6(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber %61:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec %0, 68, 0 :: ("amdgpu-noclobber" load (s64) from %ir.gepD, align 4, addrspace 1)
- ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[GLOBAL_LOAD_DWORD_SADDR1]], %140, implicit $exec
+ ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[GLOBAL_LOAD_DWORD_SADDR1]], %125, implicit $exec
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY %61.sub0, implicit $exec
; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY %61.sub1, implicit $exec
- ; CHECK-NEXT: [[V_SUB_U32_e32_:%[0-9]+]]:vgpr_32 = V_SUB_U32_e32 [[GLOBAL_LOAD_DWORD_SADDR1]], [[V_ADD_U32_e32_]], implicit $exec
+ ; CHECK-NEXT: [[V_SUB_U32_e32_:%[0-9]+]]:vgpr_32 = V_SUB_U32_e32 [[GLOBAL_LOAD_DWORD_SADDR1]], %141, implicit $exec
; CHECK-NEXT: S_BRANCH %bb.6
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.B:
; CHECK-NEXT: successors: %bb.4(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[S_LOAD_DWORD_IMM1:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0, 4, 0 :: ("amdgpu-noclobber" load (s32) from %ir.gepLI, addrspace 1)
- ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM_ec:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec %0, 108, 0 :: ("amdgpu-noclobber" load (s64) from %ir.gepD1, align 4, addrspace 1)
+ ; CHECK-NEXT: early-clobber %132:sreg_64_xexec = S_LOAD_DWORDX2_IMM_ec %0, 108, 0 :: ("amdgpu-noclobber" load (s64) from %ir.gepD1, align 4, addrspace 1)
; CHECK-NEXT: [[S_LOAD_DWORD_IMM2:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0, 1024, 0 :: ("amdgpu-noclobber" load (s32) from %ir.gepLB, addrspace 1)
; CHECK-NEXT: [[V_LSHLREV_B32_e32_2:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e32 4, [[COPY1]], implicit $exec
- ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_LOAD_DWORDX2_IMM_ec]].sub1, implicit $exec
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY %132.sub1, implicit $exec
; CHECK-NEXT: S_BRANCH %bb.4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.Flow9:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.6(0x40000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[V_LSHLREV_B32_e32_2]], %bb.5, [[DEF2]].sub0, %bb.0
- ; CHECK-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI %134, %bb.5, [[DEF1]], %bb.0
- ; CHECK-NEXT: [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[S_LOAD_DWORDX2_IMM_ec]].sub0, %bb.5, [[DEF]].sub0, %bb.0
- ; CHECK-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI %136, %bb.5, [[COPY1]], %bb.0
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI [[DEF2]].sub0, %bb.0, [[V_LSHLREV_B32_e32_2]], %bb.5
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI [[DEF1]], %bb.0, %137, %bb.5
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[DEF]].sub0, %bb.0, %132.sub0, %bb.5
+ ; CHECK-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI [[COPY1]], %bb.0, %126, %bb.5
; CHECK-NEXT: [[S_OR_SAVEEXEC_B64_:%[0-9]+]]:sreg_64 = S_OR_SAVEEXEC_B64 [[S_XOR_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec
; CHECK-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[PHI2]], implicit $exec
; CHECK-NEXT: [[SI_SPILL_V32_RESTORE:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
@@ -397,21 +397,21 @@ body: |
; CHECK-NEXT: bb.4.H:
; CHECK-NEXT: successors: %bb.5(0x04000000), %bb.4(0x7c000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[PHI4:%[0-9]+]]:vgpr_32 = PHI [[COPY5]], %bb.2, %134, %bb.4
- ; CHECK-NEXT: [[PHI5:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[S_LOAD_DWORD_IMM1]], %bb.2, %135, %bb.4
- ; CHECK-NEXT: undef [[V_LSHLREV_B32_e32_3:%[0-9]+]].sub0:vreg_64 = V_LSHLREV_B32_e32 [[PHI5]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
+ ; CHECK-NEXT: [[PHI4:%[0-9]+]]:sreg_32_xm0_xexec = PHI [[S_LOAD_DWORD_IMM1]], %bb.2, %148, %bb.4
+ ; CHECK-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI [[COPY5]], %bb.2, %137, %bb.4
+ ; CHECK-NEXT: undef [[V_LSHLREV_B32_e32_3:%[0-9]+]].sub0:vreg_64 = V_LSHLREV_B32_e32 [[PHI4]], [[GLOBAL_LOAD_DWORD_SADDR1]], implicit $exec
; CHECK-NEXT: [[V_ASHRREV_I32_e32_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 31, [[V_LSHLREV_B32_e32_3]].sub0, implicit $exec
- ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ASHRREV_I32_e32_]], %subreg.sub1, [[V_LSHLREV_B32_e32_3]].sub0, %subreg.sub0
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_LSHLREV_B32_e32_3]].sub0, %subreg.sub0, [[V_ASHRREV_I32_e32_]], %subreg.sub1
; CHECK-NEXT: [[V_LSHLREV_B64_e64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64_e64 2, [[REG_SEQUENCE]], implicit $exec
; CHECK-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY %0.sub1
; CHECK-NEXT: undef [[V_ADD_CO_U32_e64_:%[0-9]+]].sub0:vreg_64, [[V_ADD_CO_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64 %0.sub0, [[V_LSHLREV_B64_e64_]].sub0, 0, implicit $exec
; CHECK-NEXT: SI_SPILL_V32_SAVE [[V_MUL_U32_U24_e32_]].sub0, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
; CHECK-NEXT: [[V_ADDC_U32_e64_:%[0-9]+]]:vgpr_32, dead [[V_ADDC_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_ADDC_U32_e64 [[COPY7]], [[V_LSHLREV_B64_e64_]].sub1, [[V_ADD_CO_U32_e64_1]], 0, implicit $exec
- ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADDC_U32_e64_]], %subreg.sub1, [[V_ADD_CO_U32_e64_]].sub0, %subreg.sub0
+ ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD_CO_U32_e64_]].sub0, %subreg.sub0, [[V_ADDC_U32_e64_]], %subreg.sub1
; CHECK-NEXT: [[GLOBAL_LOAD_DWORD:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[REG_SEQUENCE1]], 0, 0, implicit $exec :: (load (s32) from %ir.gepL, addrspace 1)
- ; CHECK-NEXT: [[V_ADD_U32_e32_1:%[0-9]+]]:vgpr_32 = nsw V_ADD_U32_e32 [[GLOBAL_LOAD_DWORD]], [[PHI4]], implicit $exec
- ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32_xm0_xexec = nsw S_ADD_I32 [[PHI5]], 1, implicit-def dead $scc
- ; CHECK-NEXT: S_CMP_LE_I32 [[S_ADD_I32_]], [[S_LOAD_DWORD_IMM2]], implicit-def $scc
+ ; CHECK-NEXT: [[V_ADD_U32_e32_1:%[0-9]+]]:vgpr_32 = nsw V_ADD_U32_e32 [[GLOBAL_LOAD_DWORD]], [[PHI5]], implicit $exec
+ ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32_xm0_xexec = nsw S_ADD_I32 [[PHI4]], 1, implicit-def dead $scc
+ ; CHECK-NEXT: S_CMP_LE_I32 [[PHI4]], [[S_LOAD_DWORD_IMM2]], implicit-def $scc
; CHECK-NEXT: S_CBRANCH_SCC1 %bb.4, implicit $scc
; CHECK-NEXT: S_BRANCH %bb.5
; CHECK-NEXT: {{ $}}
@@ -423,30 +423,30 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.6.E:
; CHECK-NEXT: [[PHI6:%[0-9]+]]:vgpr_32 = PHI [[COPY3]], %bb.1, [[COPY6]], %bb.3
- ; CHECK-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI [[V_SUB_U32_e32_]], %bb.1, [[SI_SPILL_V32_RESTORE]], %bb.3
- ; CHECK-NEXT: [[PHI8:%[0-9]+]]:vgpr_32 = PHI [[V_ADD_U32_e32_]], %bb.1, [[PHI]], %bb.3
- ; CHECK-NEXT: [[PHI9:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.1, [[PHI1]], %bb.3
+ ; CHECK-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI [[V_ADD_U32_e32_]], %bb.1, [[PHI]], %bb.3
+ ; CHECK-NEXT: [[PHI8:%[0-9]+]]:vgpr_32 = PHI [[COPY4]], %bb.1, [[PHI1]], %bb.3
+ ; CHECK-NEXT: [[PHI9:%[0-9]+]]:vgpr_32 = PHI [[V_SUB_U32_e32_]], %bb.1, [[SI_SPILL_V32_RESTORE]], %bb.3
; CHECK-NEXT: $exec = S_OR_B64 $exec, [[S_OR_SAVEEXEC_B64_]], implicit-def $scc
- ; CHECK-NEXT: [[V_ASHRREV_I32_e32_1:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 31, [[PHI8]], implicit $exec
- ; CHECK-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ASHRREV_I32_e32_1]], %subreg.sub1, [[PHI8]], %subreg.sub0
+ ; CHECK-NEXT: [[V_ASHRREV_I32_e32_1:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 31, [[PHI7]], implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[PHI7]], %subreg.sub0, [[V_ASHRREV_I32_e32_1]], %subreg.sub1
; CHECK-NEXT: [[V_LSHLREV_B64_e64_1:%[0-9]+]]:vreg_64 = V_LSHLREV_B64_e64 2, [[REG_SEQUENCE2]], implicit $exec
; CHECK-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY %0.sub1
; CHECK-NEXT: undef [[V_ADD_CO_U32_e64_2:%[0-9]+]].sub0:vreg_64, [[V_ADD_CO_U32_e64_3:%[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64 %0.sub0, [[V_LSHLREV_B64_e64_1]].sub0, 0, implicit $exec
- ; CHECK-NEXT: SI_SPILL_V32_SAVE killed [[PHI9]], %stack.1, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.1, addrspace 5)
+ ; CHECK-NEXT: SI_SPILL_V32_SAVE killed [[PHI8]], %stack.1, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.1, addrspace 5)
; CHECK-NEXT: [[V_ADDC_U32_e64_2:%[0-9]+]]:vgpr_32, dead [[V_ADDC_U32_e64_3:%[0-9]+]]:sreg_64_xexec = V_ADDC_U32_e64 [[COPY8]], [[V_LSHLREV_B64_e64_1]].sub1, [[V_ADD_CO_U32_e64_3]], 0, implicit $exec
- ; CHECK-NEXT: [[V_ASHRREV_I32_e32_2:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 31, [[PHI7]], implicit $exec
- ; CHECK-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ASHRREV_I32_e32_2]], %subreg.sub1, [[PHI7]], %subreg.sub0
+ ; CHECK-NEXT: [[V_ASHRREV_I32_e32_2:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e32 31, [[PHI9]], implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[PHI9]], %subreg.sub0, [[V_ASHRREV_I32_e32_2]], %subreg.sub1
; CHECK-NEXT: [[V_LSHLREV_B64_e64_2:%[0-9]+]]:vreg_64 = V_LSHLREV_B64_e64 2, [[REG_SEQUENCE3]], implicit $exec
; CHECK-NEXT: undef [[V_ADD_CO_U32_e64_4:%[0-9]+]].sub0:vreg_64, [[V_ADD_CO_U32_e64_5:%[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64 %0.sub0, [[V_LSHLREV_B64_e64_2]].sub0, 0, implicit $exec
; CHECK-NEXT: SI_SPILL_V32_SAVE killed [[GLOBAL_LOAD_DWORD_SADDR]], %stack.2, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.2, addrspace 5)
; CHECK-NEXT: [[V_ADDC_U32_e64_4:%[0-9]+]]:vgpr_32, dead [[V_ADDC_U32_e64_5:%[0-9]+]]:sreg_64_xexec = V_ADDC_U32_e64 [[COPY8]], [[V_LSHLREV_B64_e64_2]].sub1, [[V_ADD_CO_U32_e64_5]], 0, implicit $exec
; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[GLOBAL_LOAD_DWORD_SADDR1]], [[PHI6]], implicit $exec
- ; CHECK-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADDC_U32_e64_2]], %subreg.sub1, [[V_ADD_CO_U32_e64_2]].sub0, %subreg.sub0
+ ; CHECK-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD_CO_U32_e64_2]].sub0, %subreg.sub0, [[V_ADDC_U32_e64_2]], %subreg.sub1
; CHECK-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE4]], [[V_MUL_LO_U32_e64_]], 0, 0, implicit $exec :: (store (s32) into %ir.gepRX, addrspace 1)
; CHECK-NEXT: [[SI_SPILL_V32_RESTORE1:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.2, addrspace 5)
; CHECK-NEXT: [[SI_SPILL_V32_RESTORE2:%[0-9]+]]:vgpr_32 = SI_SPILL_V32_RESTORE %stack.1, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.1, addrspace 5)
; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[SI_SPILL_V32_RESTORE1]], [[SI_SPILL_V32_RESTORE2]], implicit $exec
- ; CHECK-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADDC_U32_e64_4]], %subreg.sub1, [[V_ADD_CO_U32_e64_4]].sub0, %subreg.sub0
+ ; CHECK-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD_CO_U32_e64_4]].sub0, %subreg.sub0, [[V_ADDC_U32_e64_4]], %subreg.sub1
; CHECK-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE5]], [[V_MUL_LO_U32_e64_1]], 0, 0, implicit $exec :: (store (s32) into %ir.gepRY, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
bb.0.S:
diff --git a/llvm/test/CodeGen/AMDGPU/SSARA/test_rebuild_ssa_subregs.mir b/llvm/test/CodeGen/AMDGPU/SSARA/test_rebuild_ssa_subregs.mir
index ee73e0dcfa377..d12242a6c4eaa 100644
--- a/llvm/test/CodeGen/AMDGPU/SSARA/test_rebuild_ssa_subregs.mir
+++ b/llvm/test/CodeGen/AMDGPU/SSARA/test_rebuild_ssa_subregs.mir
@@ -267,22 +267,16 @@ body: |
; CHECK-NEXT: bb.2.Flow:
; CHECK-NEXT: successors: %bb.4(0x40000000), %bb.3(0x40000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[PHI:%[0-9]+]]:vgpr_32 = PHI %62.sub3, %bb.6, [[DEF]].sub3, %bb.1
- ; CHECK-NEXT: [[PHI1:%[0-9]+]]:vgpr_32 = PHI %62.sub0, %bb.6, [[DEF]].sub0, %bb.1
- ; CHECK-NEXT: [[PHI2:%[0-9]+]]:vgpr_32 = PHI %63, %bb.6, [[DEF]].sub2, %bb.1
- ; CHECK-NEXT: [[PHI3:%[0-9]+]]:vgpr_32 = PHI %62.sub1, %bb.6, [[DEF]].sub1, %bb.1
- ; CHECK-NEXT: [[PHI4:%[0-9]+]]:sreg_64_xexec = PHI %61, %bb.6, [[S_MOV_B64_]], %bb.1
- ; CHECK-NEXT: $vcc = S_ANDN2_B64 $exec, [[PHI4]], implicit-def dead $scc
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:vreg_128 = PHI [[DEF]], %bb.1, %71, %bb.6
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:sreg_64_xexec = PHI [[S_MOV_B64_]], %bb.1, %61, %bb.6
+ ; CHECK-NEXT: $vcc = S_ANDN2_B64 $exec, [[PHI1]], implicit-def dead $scc
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.Flow3:
; CHECK-NEXT: successors: %bb.5(0x80000000)
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[PHI5:%[0-9]+]]:vgpr_32 = PHI %72.sub3, %bb.4, [[PHI]], %bb.2
- ; CHECK-NEXT: [[PHI6:%[0-9]+]]:vgpr_32 = PHI %72.sub0, %bb.4, [[PHI1]], %bb.2
- ; CHECK-NEXT: [[PHI7:%[0-9]+]]:vgpr_32 = PHI %72.sub2, %bb.4, [[PHI2]], %bb.2
- ; CHECK-NEXT: [[PHI8:%[0-9]+]]:vgpr_32 = PHI %72.sub1, %bb.4, [[PHI3]], %bb.2
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:vreg_128 = PHI [[PHI]], %bb.2, %72, %bb.4
; CHECK-NEXT: S_BRANCH %bb.5
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4.bb2:
@@ -291,17 +285,17 @@ body: |
; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[S_LOAD_DWORD_IMM]], [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0, implicit $exec
; CHECK-NEXT: [[V_ADD_U32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[GLOBAL_LOAD_DWORDX4_SADDR]].sub1, [[GLOBAL_LOAD_DWORDX4_SADDR]].sub0, implicit $exec
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[V_ADD_U32_e32_]]
- ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[V_ADD_U32_e32_1]], %subreg.sub1, [[GLOBAL_LOAD_DWORDX4_SADDR]].sub2_sub3, %subreg.sub2_sub3
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[GLOBAL_LOAD_DWORDX4_SADDR]].sub2_sub3, %subreg.sub2_sub3, [[V_ADD_U32_e32_1]], %subreg.sub1, [[COPY2]], %subreg.sub0
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]]
; CHECK-NEXT: S_BRANCH %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.5.bb3:
- ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[PHI8]], [[PHI7]], implicit $exec
- ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[PHI6]], [[PHI5]], implicit $exec
- ; CHECK-NEXT: [[V_MAD_U64_U32_e64_:%[0-9]+]]:vreg_64, dead [[V_MAD_U64_U32_e64_1:%[0-9]+]]:sreg_64 = V_MAD_U64_U32_e64 [[PHI6]], [[PHI7]], 0, 0, implicit $exec
+ ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[PHI2]].sub1, [[PHI2]].sub2, implicit $exec
+ ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[PHI2]].sub0, [[PHI2]].sub3, implicit $exec
+ ; CHECK-NEXT: [[V_MAD_U64_U32_e64_:%[0-9]+]]:vreg_64, dead [[V_MAD_U64_U32_e64_1:%[0-9]+]]:sreg_64 = V_MAD_U64_U32_e64 [[PHI2]].sub0, [[PHI2]].sub2, 0, 0, implicit $exec
; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: [[V_ADD3_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD3_U32_e64 [[V_MAD_U64_U32_e64_]].sub1, [[V_MUL_LO_U32_e64_1]], [[V_MUL_LO_U32_e64_]], implicit $exec
- ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_ADD3_U32_e64_]], %subreg.sub1, [[V_MAD_U64_U32_e64_]].sub0, %subreg.sub0
+ ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[V_MAD_U64_U32_e64_]].sub0, %subreg.sub0, [[V_ADD3_U32_e64_]], %subreg.sub1
; CHECK-NEXT: GLOBAL_STORE_DWORDX2_SADDR [[V_MOV_B32_e32_]], [[REG_SEQUENCE1]], %0.sub2_sub3, 176, 0, implicit $exec :: (store (s64) into %ir.gep2, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
; CHECK-NEXT: {{ $}}
@@ -312,6 +306,7 @@ body: |
; CHECK-NEXT: [[COPY4:%[0-9]+]]:vreg_128 = COPY [[GLOBAL_LOAD_DWORDX4_SADDR]]
; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[V_ADD_U32_e32_2]]
; CHECK-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 0
+ ; CHECK-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]].sub0, %subreg.sub0, [[COPY4]].sub1, %subreg.sub1, [[COPY4]].sub3, %subreg.sub3, [[COPY5]], %subreg.sub2
; CHECK-NEXT: S_BRANCH %bb.2
bb.0.bb0:
successors: %bb.6(0x40000000), %bb.1(0x40000000)
>From a76c3e30593327e3c394b8e5994e940ae7ea855b Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Mon, 11 Aug 2025 16:45:15 +0000
Subject: [PATCH 44/46] Rebuild SSA: Re-implemented to use the LiveIntervals.
Refactored and cleaned up.
---
llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp | 951 ++++++++------------
1 file changed, 362 insertions(+), 589 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
index ef6cc1d38253b..b426e6b530e48 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURebuildSSA.cpp
@@ -27,306 +27,143 @@ using namespace llvm;
namespace {
class AMDGPURebuildSSALegacy : public MachineFunctionPass {
- LiveIntervals *LIS;
- MachineDominatorTree *MDT;
- const SIInstrInfo *TII;
- const SIRegisterInfo *TRI;
- MachineRegisterInfo *MRI;
- MachineLoopInfo *MLI;
-
+ LiveIntervals *LIS = nullptr;
+ MachineDominatorTree *MDT = nullptr;
+ const SIInstrInfo *TII = nullptr;
+ const SIRegisterInfo *TRI = nullptr;
+ MachineRegisterInfo *MRI = nullptr;
+ MachineLoopInfo *MLI = nullptr;
+
+ // Optional scratch; currently unused but kept for parity with your header.
DenseMap<MachineOperand *, std::pair<MachineInstr *, LaneBitmask>>
RegSeqences;
- void buildRealPHI(VNInfo *VNI, LiveInterval &LI,
- Register OldVR);
- void splitNonPhiValue(VNInfo *VNI,
- LiveInterval &LI, Register OldVR);
- void rewriteUses(MachineInstr *DefMI, Register OldVR,
- LaneBitmask MaskToRewrite, Register NewVR, LiveInterval &LI,
- VNInfo *VNI);
-
- typedef struct {
- Register CurName;
- LaneBitmask PrevMask;
- unsigned PrevSubRegIdx;
- MachineInstr *DefMI;
- } CurVRegInfo;
+ //===--------------------------------------------------------------------===//
+ // Internal helpers (now class methods)
+ //===--------------------------------------------------------------------===//
+
+ /// Return the VNInfo reaching this PHI operand along its predecessor edge.
+ VNInfo *incomingOnEdge(LiveInterval &LI, MachineInstr *Phi,
+ MachineOperand &PhiOp) {
+ unsigned OpIdx = Phi->getOperandNo(&PhiOp);
+ MachineBasicBlock *Pred = Phi->getOperand(OpIdx + 1).getMBB();
+ SlotIndex EndB = LIS->getMBBEndIdx(Pred);
+ return LI.getVNInfoBefore(EndB);
+ }
- using VRegDefStack = std::vector<CurVRegInfo>;
+ /// True if \p UseMI’s operand is reached by \p VNI (PHIs, same-block order,
+ /// cross-block dominance).
+ bool reachedByThisVNI(LiveInterval &LI, MachineInstr *DefMI,
+ MachineInstr *UseMI, MachineOperand &UseOp,
+ VNInfo *VNI) {
+ if (UseMI->isPHI())
+ return incomingOnEdge(LI, UseMI, UseOp) == VNI;
-#ifndef NDEBUG
- void printVRegDefStack(VRegDefStack VregDefs) {
- VRegDefStack::reverse_iterator It = VregDefs.rbegin();
- dbgs() << "\n####################################\n";
- for (; It != VregDefs.rend(); ++It) {
- CurVRegInfo VRInfo = *It;
- dbgs() << printReg(VRInfo.CurName, TRI, VRInfo.PrevSubRegIdx) << "\n";
- MachineInstr *DefMI = VRInfo.DefMI;
- dbgs() << "DefMI: " << *DefMI << "\n";
- LaneBitmask DefMask = VRInfo.PrevMask;
- dbgs() << "Def mask : " << PrintLaneMask(DefMask) << "\n";
+ if (UseMI->getParent() == DefMI->getParent()) {
+ SlotIndex DefIdx = LIS->getInstructionIndex(*DefMI);
+ SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI);
+ return DefIdx < UseIdx; // strict within-block order
}
- dbgs() << "####################################\n";
+ return MDT->dominates(DefMI->getParent(), UseMI->getParent());
}
-#endif
- SetVector<VRegMaskPair> CrossBlockVRegs;
- DenseMap<VRegMaskPair, SmallPtrSet<MachineBasicBlock *, 8>> DefBlocks;
- DenseMap<VRegMaskPair, SmallPtrSet<MachineBasicBlock *, 8>> LiveInBlocks;
- DenseMap<unsigned, SetVector<VRegMaskPair>> PHINodes;
- DenseMap<MachineInstr *, VRegMaskPair> PHIMap;
- DenseSet<unsigned> DefSeen;
- DenseSet<unsigned> Renamed;
- DenseSet<unsigned> Visited;
-
- void collectCrossBlockVRegs(MachineFunction &MF);
- void findPHINodesPlacement(const SmallPtrSetImpl<MachineBasicBlock *> &LiveInBlocks,
- const SmallPtrSetImpl<MachineBasicBlock *> &DefBlocks,
- SmallVectorImpl<MachineBasicBlock *> &PHIBlocks) {
-
- IDFCalculatorBase<MachineBasicBlock, false> IDF(MDT->getBase());
-
- IDF.setLiveInBlocks(LiveInBlocks);
- IDF.setDefiningBlocks(DefBlocks);
- IDF.calculate(PHIBlocks);
+ /// What lanes does this operand read?
+ LaneBitmask operandLaneMask(const MachineOperand &MO) const {
+ if (unsigned Sub = MO.getSubReg())
+ return TRI->getSubRegIndexLaneMask(Sub);
+ return MRI->getMaxLaneMaskForVReg(MO.getReg());
}
- MachineOperand &rewriteUse(MachineOperand &Op, MachineBasicBlock::iterator I,
- MachineBasicBlock &MBB,
- DenseMap<unsigned, VRegDefStack> VregNames) {
- // Sub-reg handling:
- // 1. if (UseMask & ~DefMask) != 0 : current Def does not define all used
- // lanes. We should search names stack for the Def that defines missed
- // lanes to construct the REG_SEQUENCE
- // 2. if (UseMask & DefMask) == 0 : current Def defines subregisters of a
- // register which are not used by the current Use. We should search names
- // stack for the corresponding sub-register def. Replace reg.subreg in Use
- // only if VReg.subreg found != current VReg.subreg in use!
- // 3. (UseMask & DefMask) == UseMask just replace the reg if the reg found
- // != current reg in Use. Take care of the subreg in Use. If (DefMask |
- // UseMask) != UseMask, i.e. current Def defines more lanes that is used
- // by the current Use, we need to calculate the corresponding subreg index
- // for the Use. DefinedLanes serves as a result of the expression
- // mentioned above. UndefSubRegs initially is set to UseMask but is
- // updated on each iteration if we are looking for the sub-regs
- // definitions to compose REG_SEQUENCE.
- bool RewriteOp = true;
- unsigned VReg = Op.getReg();
- assert(!VregNames[VReg].empty() &&
- "Error: use does not dominated by definition!\n");
- SmallVector<std::tuple<unsigned, unsigned, unsigned>> RegSeqOps;
- LaneBitmask UseMask = getOperandLaneMask(Op, TRI, MRI);
- LLVM_DEBUG(dbgs() << "Use mask : " << PrintLaneMask(UseMask)
- << "\nLooking for appropriate definiton...\n");
- LaneBitmask UndefSubRegs = UseMask;
- LaneBitmask DefinedLanes = LaneBitmask::getNone();
- unsigned SubRegIdx = AMDGPU::NoRegister;
- Register CurVReg = AMDGPU::NoRegister;
- VRegDefStack VregDefs = VregNames[VReg];
- VRegDefStack::reverse_iterator It = VregDefs.rbegin();
- for (; It != VregDefs.rend(); ++It) {
- CurVRegInfo VRInfo = *It;
- CurVReg = VRInfo.CurName;
- MachineInstr *DefMI = VRInfo.DefMI;
- MachineOperand *DefOp = DefMI->findRegisterDefOperand(CurVReg, TRI);
- const TargetRegisterClass *RC =
- TRI->getRegClassForOperandReg(*MRI, *DefOp);
- LaneBitmask DefMask = VRInfo.PrevMask;
- LaneBitmask LanesDefinedyCurrentDef = (UndefSubRegs & DefMask) & UseMask;
- DefinedLanes |= LanesDefinedyCurrentDef;
- LLVM_DEBUG(dbgs() << "Def:\nDefMI: " << *DefMI << "\nOperand : " << *DefOp
- << "\nDef mask : " << PrintLaneMask(DefMask)
- << "\nLanes defined by current Def: "
- << PrintLaneMask(LanesDefinedyCurrentDef)
- << "\nTotal defined lanes: " << PrintLaneMask(DefinedLanes)
- << "\n");
-
- if (LanesDefinedyCurrentDef == UseMask) {
- // All lanes used here are defined by this def.
- if (CurVReg == VReg && Op.getSubReg() == DefOp->getSubReg()) {
- // Need nothing - bail out.
- RewriteOp = false;
- break;
- }
- SubRegIdx = DefOp->getSubReg();
- if ((DefMask & ~UseMask).any()) {
- // Definition defines more lanes then used. Need sub register
- // index;
- SubRegIdx = getSubRegIndexForLaneMask(UseMask, TRI);
- }
- break;
- }
-
- if (LanesDefinedyCurrentDef.any()) {
- // Current definition defines some of the lanes used here.
- unsigned DstSubReg =
- getSubRegIndexForLaneMask(LanesDefinedyCurrentDef, TRI);
- if (!DstSubReg) {
- SmallVector<unsigned> Idxs =
- getCoveringSubRegsForLaneMask(LanesDefinedyCurrentDef, RC, TRI);
- for (unsigned SubIdx : Idxs) {
- LLVM_DEBUG(dbgs() << "Matching subreg: " << SubIdx << " : "
- << PrintLaneMask(TRI->getSubRegIndexLaneMask(SubIdx))
- << "\n");
- RegSeqOps.push_back({CurVReg, SubIdx, SubIdx});
- }
- } else {
- unsigned SrcSubReg = (DefMask & ~LanesDefinedyCurrentDef).any()
- ? DstSubReg
- : DefOp->getSubReg();
- RegSeqOps.push_back({CurVReg, SrcSubReg, DstSubReg});
- }
- UndefSubRegs = UseMask & ~DefinedLanes;
- LLVM_DEBUG(dbgs() << "UndefSubRegs: " << PrintLaneMask(UndefSubRegs) << "\n");
- if (UndefSubRegs.none())
- break;
- } else {
- // The current definition does not define any of the lanes used
- // here. Continue to search for the definition.
- LLVM_DEBUG(dbgs() << "No lanes defined by this def!\n");
- continue;
- }
+ /// Build a REG_SEQUENCE to materialize a super-reg/mixed-lane use.
+ /// Inserts at the PHI predecessor terminator (for PHI uses) or right before
+ /// UseMI otherwise. Returns the new full-width vreg, the RS index via OutIdx,
+ /// and the subrange lane masks that should be extended to that point.
+ Register buildRSForSuperUse(MachineInstr *UseMI, MachineOperand &MO,
+ Register OldVR, Register NewVR,
+ LaneBitmask MaskToRewrite, LiveInterval &LI,
+ const TargetRegisterClass *OpRC,
+ SlotIndex &OutIdx,
+ SmallVectorImpl<LaneBitmask> &LanesToExtend) {
+ MachineBasicBlock *InsertBB = UseMI->getParent();
+ MachineBasicBlock::iterator IP(UseMI);
+ SlotIndex QueryIdx;
+
+ if (UseMI->isPHI()) {
+ unsigned OpIdx = UseMI->getOperandNo(&MO);
+ MachineBasicBlock *Pred = UseMI->getOperand(OpIdx + 1).getMBB();
+ InsertBB = Pred;
+ IP = Pred->getFirstTerminator(); // ok if == end()
+ QueryIdx = LIS->getMBBEndIdx(Pred).getPrevSlot();
+ } else {
+ QueryIdx = LIS->getInstructionIndex(*UseMI);
}
- if (UndefSubRegs != UseMask && !UndefSubRegs.none()) {
- // WE haven't found all sub-regs definition. Assume undef.
- // Insert IMPLISIT_DEF
-
- const TargetRegisterClass *RC = TRI->getRegClassForOperandReg(*MRI, Op);
- SmallVector<unsigned> Idxs =
- getCoveringSubRegsForLaneMask(UndefSubRegs, RC, TRI);
- for (unsigned SubIdx : Idxs) {
- const TargetRegisterClass *SubRC = TRI->getSubRegisterClass(RC, SubIdx);
- Register NewVReg = MRI->createVirtualRegister(SubRC);
- BuildMI(MBB, I, I->getDebugLoc(), TII->get(AMDGPU::IMPLICIT_DEF))
- .addReg(NewVReg, RegState::Define);
- RegSeqOps.push_back({NewVReg, AMDGPU::NoRegister, SubIdx});
- }
- }
+ Register Dest = MRI->createVirtualRegister(OpRC);
+ auto RS = BuildMI(*InsertBB, IP,
+ (IP != InsertBB->end() ? IP->getDebugLoc() : DebugLoc()),
+ TII->get(TargetOpcode::REG_SEQUENCE), Dest);
- if (!RegSeqOps.empty()) {
- // All subreg defs are found. Insert REG_SEQUENCE.
- auto *RC = TRI->getRegClassForReg(*MRI, VReg);
- CurVReg = MRI->createVirtualRegister(RC);
- auto RS = BuildMI(MBB, I, I->getDebugLoc(),
- TII->get(AMDGPU::REG_SEQUENCE), CurVReg);
- for (auto O : RegSeqOps) {
- auto [R, SrcSubreg, DstSubreg] = O;
- RS.addReg(R, 0, SrcSubreg);
- RS.addImm(DstSubreg);
- }
+ SmallDenseSet<unsigned, 8> AddedSubIdxs;
+ SmallDenseSet<LaneBitmask::Type, 8> AddedMasks;
- VregNames[VReg].push_back({CurVReg, MRI->getMaxLaneMaskForVReg(CurVReg),
- AMDGPU::NoRegister, RS});
- }
+ for (const LiveInterval::SubRange &SR : LI.subranges()) {
+ if (!SR.getVNInfoAt(QueryIdx))
+ continue;
+ LaneBitmask Lane = SR.LaneMask;
+ if (!AddedMasks.insert(Lane.getAsInteger()).second)
+ continue;
- assert(CurVReg != AMDGPU::NoRegister &&
- "Use is not dominated by definition!\n");
+ unsigned SubIdx = getSubRegIndexForLaneMask(Lane, TRI);
+ if (!SubIdx || !AddedSubIdxs.insert(SubIdx).second)
+ continue;
- if (RewriteOp) {
- LLVM_DEBUG(dbgs() << "Rewriting use: " << Op << " to "
- << printReg(CurVReg, TRI, SubRegIdx, MRI) << "\n");
- Op.setReg(CurVReg);
- Op.setSubReg(SubRegIdx);
- }
- return Op;
- }
+ if (Lane == MaskToRewrite)
+ RS.addReg(NewVR).addImm(SubIdx);
+ else
+ RS.addReg(OldVR, 0, SubIdx).addImm(SubIdx);
- void renameVRegs(MachineBasicBlock &MBB,
- DenseMap<unsigned, VRegDefStack> VregNames) {
- if (Visited.contains(MBB.getNumber()))
- return;
-
- for (auto &PHI : MBB.phis()) {
- MachineOperand &Op = PHI.getOperand(0);
- Register Res = Op.getReg();
- unsigned SubRegIdx = Op.getSubReg();
- const TargetRegisterClass *RC =
- SubRegIdx ? TRI->getSubRegisterClass(
- TRI->getRegClassForReg(*MRI, Res), SubRegIdx)
- : TRI->getRegClassForReg(*MRI, Res);
- Register NewVReg = MRI->createVirtualRegister(RC);
- Op.setReg(NewVReg);
- Op.setSubReg(AMDGPU::NoRegister);
- VregNames[Res].push_back({NewVReg,
- SubRegIdx == AMDGPU::NoRegister
- ? MRI->getMaxLaneMaskForVReg(Res)
- : TRI->getSubRegIndexLaneMask(SubRegIdx),
- AMDGPU::NoRegister, &PHI});
- LLVM_DEBUG(dbgs() << "\nNames stack:\n";printVRegDefStack(VregNames[Res]));
- DefSeen.insert(NewVReg);
- Renamed.insert(Res);
+ LanesToExtend.push_back(Lane);
}
- for (auto &I : make_range(MBB.getFirstNonPHI(), MBB.end())) {
-
-
- for (auto &Op : I.uses()) {
- if (Op.isReg() && Op.getReg().isVirtual() &&
- Renamed.contains(Op.getReg())) {
- Op = rewriteUse(Op, I, MBB, VregNames);
- }
- }
- for (auto &Op : I.defs()) {
- if (Op.getReg().isVirtual()) {
- unsigned VReg = Op.getReg();
- if (DefSeen.contains(VReg)) {
- const TargetRegisterClass *RC =
- TRI->getRegClassForOperandReg(*MRI, Op);
- Register NewVReg = MRI->createVirtualRegister(RC);
- VregNames[VReg].push_back({NewVReg,
- getOperandLaneMask(Op, TRI, MRI),
- Op.getSubReg(), &I});
- LLVM_DEBUG(dbgs() << "\nNames stack:\n";
- printVRegDefStack(VregNames[VReg]));
-
- Op.ChangeToRegister(NewVReg, true, false, false, false, false);
- Op.setSubReg(AMDGPU::NoRegister);
- LLVM_DEBUG(dbgs()
- << "Renaming VReg: " << Register::virtReg2Index(VReg)
- << " to " << Register::virtReg2Index(NewVReg) << "\n");
- Renamed.insert(VReg);
- } else {
- VregNames[VReg].push_back(
- {VReg, getOperandLaneMask(Op, TRI, MRI), Op.getSubReg(), &I});
- LLVM_DEBUG(dbgs() << "\nNames stack:\n";
- printVRegDefStack(VregNames[VReg]));
-
- DefSeen.insert(VReg);
- }
- }
- }
+ // Fallback: ensure at least the rewritten lane appears.
+ if (AddedSubIdxs.empty()) {
+ unsigned SubIdx = getSubRegIndexForLaneMask(MaskToRewrite, TRI);
+ RS.addReg(NewVR).addImm(SubIdx);
+ LanesToExtend.push_back(MaskToRewrite);
}
- Visited.insert(MBB.getNumber());
-
- for (auto Succ : successors(&MBB)) {
- for (auto &PHI : Succ->phis()) {
- VRegMaskPair VMP = PHIMap[&PHI];
-
- unsigned SubRegIdx = VMP.getSubReg(MRI, TRI);
- if (VregNames[VMP.getVReg()].empty()) {
- PHI.addOperand(MachineOperand::CreateReg(VMP.getVReg(), false, false,
- false, false, false, false,
- SubRegIdx));
- } else {
- MachineOperand Op =
- MachineOperand::CreateReg(VMP.getVReg(), false, false, false,
- false, false, false, SubRegIdx);
- MachineBasicBlock::iterator IP = MBB.getFirstTerminator();
- Op = rewriteUse(Op, IP, MBB, VregNames);
- PHI.addOperand(Op);
- }
- PHI.addOperand(MachineOperand::CreateMBB(&MBB));
- }
- renameVRegs(*Succ, VregNames);
- }
+ LIS->InsertMachineInstrInMaps(*RS);
+ OutIdx = LIS->getInstructionIndex(*RS);
+
+#ifndef NDEBUG
+ LLVM_DEBUG({
+ dbgs() << " [RS] inserted ";
+ RS->print(dbgs());
+ });
+#endif
+ return Dest;
}
- Printable printVMP(VRegMaskPair VMP) {
- return printReg(VMP.getVReg(), TRI, VMP.getSubReg(MRI, TRI));
+ /// Extend LI (and only the specified subranges) at Idx.
+ void extendAt(LiveInterval &LI, SlotIndex Idx, ArrayRef<LaneBitmask> Lanes) {
+ SmallVector<SlotIndex, 1> P{Idx};
+ LIS->extendToIndices(LI, P);
+ for (auto &SR : LI.subranges())
+ for (LaneBitmask L : Lanes)
+ if (SR.LaneMask == L)
+ LIS->extendToIndices(SR, P);
}
+ //===--------------------------------------------------------------------===//
+ // Public interface
+ //===--------------------------------------------------------------------===//
+
+ void buildRealPHI(VNInfo *VNI, LiveInterval &LI, Register OldVR);
+ void splitNonPhiValue(VNInfo *VNI, LiveInterval &LI, Register OldVR);
+ void rewriteUses(MachineInstr *DefMI, Register OldVR,
+ LaneBitmask MaskToRewrite, Register NewVR, LiveInterval &LI,
+ VNInfo *VNI);
+
public:
static char ID;
AMDGPURebuildSSALegacy() : MachineFunctionPass(ID) {
@@ -342,235 +179,239 @@ class AMDGPURebuildSSALegacy : public MachineFunctionPass {
AU.addRequired<LiveIntervalsWrapperPass>();
MachineFunctionPass::getAnalysisUsage(AU);
}
- };
+};
} // end anonymous namespace
-void AMDGPURebuildSSALegacy::collectCrossBlockVRegs(MachineFunction &MF) {
- for (auto &MBB : MF) {
- SetVector<VRegMaskPair> Killed;
- SetVector<VRegMaskPair> Defined;
- for (auto &I : MBB) {
- for (auto Op : I.uses()) {
- if (Op.isReg() && Op.getReg().isVirtual()) {
- VRegMaskPair VMP(Op, TRI, MRI);
- if (!Killed.contains(VMP))
- for (auto V : Defined) {
- if (V.getVReg() == VMP.getVReg()) {
- if ((V.getLaneMask() & VMP.getLaneMask()) ==
- VMP.getLaneMask()) {
- Killed.insert(VMP);
- break;
- }
- }
- }
- if (!Killed.contains(VMP))
- CrossBlockVRegs.insert(VMP);
- }
- }
- for (auto Op : I.defs()) {
- if (Op.isReg() && Op.getReg().isVirtual()) {
- VRegMaskPair VMP(Op, TRI, MRI);
- Defined.insert(VMP);
- DefBlocks[VMP].insert(&MBB);
- }
- }
- }
- }
-}
+//===----------------------------------------------------------------------===//
+// buildRealPHI
+// Create a whole- or sub-reg PHI for VNI at its block begin, then rewrite
+// dominated uses to the PHI result. We require a *uniform* lane mask across
+// all predecessors; if none found we treat it as full-width.
+//===----------------------------------------------------------------------===//
void AMDGPURebuildSSALegacy::buildRealPHI(VNInfo *VNI, LiveInterval &LI,
Register OldVR) {
MachineBasicBlock *DefMBB = LIS->getMBBFromIndex(VNI->def);
SmallVector<MachineOperand> Ops;
- LaneBitmask CurrMask = LaneBitmask::getNone();
- LaneBitmask PredMask = LaneBitmask::getNone();
- LaneBitmask FullMask = MRI->getMaxLaneMaskForVReg(OldVR);
- unsigned SubRegIdx = AMDGPU::NoRegister;
- dbgs() << "\nBuild PHI for register: " << printReg(OldVR) << "\n";
- for (auto Pred : DefMBB->predecessors()) {
- dbgs() << "Pred: MBB_" << Pred->getNumber() << "\n";
- SlotIndex LastPredIdx = LIS->getMBBEndIdx(Pred);
+ const LaneBitmask FullMask = MRI->getMaxLaneMaskForVReg(OldVR);
- for (const LiveInterval::SubRange &SR : LI.subranges()) {
- // Does this sub-range contain *any* segment that refers to V ?
- if (auto V = SR.getVNInfoBefore(LastPredIdx)) {
- PredMask |= SR.LaneMask; // this lane mask is live-out of Pred
- dbgs() << "Mask : " << PrintLaneMask(SR.LaneMask) << " VNINfo: " << V
- << " id: " << V->id << "Def: " << V->def << "\n";
- }
- }
+ LaneBitmask CommonMask = LaneBitmask::getAll(); // intersection across preds
+ LaneBitmask UnionMask = LaneBitmask::getNone();
- if (!PredMask.none() && (FullMask & ~PredMask).any()) {
- // Not all lanes are merged here
- dbgs() << "Partial register merge\n";
- dbgs() << "PredMask: " << PrintLaneMask(PredMask) << "\n";
- SubRegIdx = getSubRegIndexForLaneMask(PredMask, TRI);
- } else {
- // Full register merge
- dbgs() << "Full register merge\n";
- if (PredMask.none()) {
- dbgs() << "No sub-ranges\n";
- } else {
- dbgs() << "All sub-ranges are merging. PredMask: "
- << PrintLaneMask(PredMask) << "\n";
+#ifndef NDEBUG
+ LLVM_DEBUG(dbgs() << "\n[PHI] Build PHI for " << printReg(OldVR) << " at MBB_"
+ << DefMBB->getNumber() << '\n');
+#endif
+
+ for (auto *Pred : DefMBB->predecessors()) {
+ SlotIndex EndB = LIS->getMBBEndIdx(Pred);
+ LaneBitmask EdgeMask = LaneBitmask::getNone();
+
+ for (const LiveInterval::SubRange &SR : LI.subranges())
+ if (SR.getVNInfoBefore(EndB))
+ EdgeMask |= SR.LaneMask;
+
+#ifndef NDEBUG
+ const bool HasSubranges = !LI.subranges().empty();
+ VNInfo *MainOut = LI.getVNInfoBefore(EndB); // whole-reg live-out?
+ LLVM_DEBUG({
+ dbgs() << " subranges: " << (HasSubranges ? "yes" : "no")
+ << ", main-range live-out: " << (MainOut ? "yes" : "no") << '\n';
+ });
+#endif
+
+ if (EdgeMask.none()) {
+#ifndef NDEBUG
+ LLVM_DEBUG({
+ dbgs() << " EdgeMask is NONE; reason: ";
+ if (LI.subranges().empty())
+ dbgs() << "no subranges for this vreg";
+ else if (LI.getVNInfoBefore(EndB))
+ dbgs() << "subranges exist but none live at edge; main-range is "
+ "live-out";
+ else
+ dbgs() << "subranges exist and main-range not live-out (treating as "
+ "undef edge)";
+ dbgs() << "\n";
+ });
+#endif
+
+ // Current policy: treat “no subrange info” or “main-range live-out” as
+ // full.
+ if (LI.subranges().empty() || LI.getVNInfoBefore(EndB))
+ EdgeMask = FullMask;
+ else {
+ // TODO:
+ // Optional: if we prefer making the PHI operand explicitly undef on
+ // this edge: keep EdgeMask == NONE and later add OldVR with
+ // RegState::Undef or insert an IMPLICIT_DEF.
}
}
- assert(CurrMask.none() || (CurrMask == PredMask));
- CurrMask = PredMask;
+ CommonMask &= EdgeMask;
+ UnionMask |= EdgeMask;
+
+ unsigned SubIdx = AMDGPU::NoRegister;
+ if ((FullMask & ~EdgeMask).any()) // partial register incoming
+ SubIdx = getSubRegIndexForLaneMask(EdgeMask, TRI);
- Ops.push_back(
- MachineOperand::CreateReg(OldVR, 0, 0, 0, 0, 0, 0, SubRegIdx));
+ Ops.push_back(MachineOperand::CreateReg(OldVR, /*isDef*/ false,
+ /*isImp*/ false, /*isKill*/ false,
+ /*isDead*/ false, /*isUndef*/ false,
+ /*isEarlyClobber*/ false, SubIdx));
Ops.push_back(MachineOperand::CreateMBB(Pred));
}
+ // Decide the lanes this PHI represents. If preds disagree, conservatively
+ // use the union; otherwise the intersection equals the union.
+ LaneBitmask PhiMask = (CommonMask.none() ? UnionMask : CommonMask);
+ if (PhiMask.none())
+ PhiMask = FullMask;
+
+#ifndef NDEBUG
+ LLVM_DEBUG(dbgs() << " [PHI] final mask=" << PrintLaneMask(PhiMask) << '\n');
+#endif
+
const TargetRegisterClass *RC =
TRI->getRegClassForOperandReg(*MRI, Ops.front());
-
- Register DestReg =
- MRI->createVirtualRegister(RC);
-
+ Register DestReg = MRI->createVirtualRegister(RC);
+
auto PHINode = BuildMI(*DefMBB, DefMBB->begin(), DebugLoc(),
TII->get(TargetOpcode::PHI), DestReg)
.add(ArrayRef(Ops));
-
MachineInstr *PHI = PHINode.getInstr();
LIS->InsertMachineInstrInMaps(*PHI);
- rewriteUses(PHI, OldVR, CurrMask.none() ? FullMask : CurrMask, DestReg, LI,
- VNI);
+#ifndef NDEBUG
+ LLVM_DEBUG({
+ dbgs() << " [PHI] inserted ";
+ PHI->print(dbgs());
+ });
+#endif
+
+ // Rewrite dominated uses to the PHI’s value.
+ rewriteUses(PHI, OldVR, PhiMask, DestReg, LI, VNI);
LIS->createAndComputeVirtRegInterval(DestReg);
}
+//===----------------------------------------------------------------------===//
+// splitNonPhiValue
+// Turn a (non-PHI) value number into a new vreg definition, then rewrite
+// dominated uses of the affected lanes to that new vreg.
+//===----------------------------------------------------------------------===//
+
void AMDGPURebuildSSALegacy::splitNonPhiValue(VNInfo *VNI, LiveInterval &LI,
Register OldVR) {
MachineInstr *DefMI = LIS->getInstructionFromIndex(VNI->def);
- int DefIdx = DefMI->findRegisterDefOperandIdx(OldVR, TRI, false, true);
- MachineOperand &MO = DefMI->getOperand(DefIdx);
+ int OpIdx = DefMI->findRegisterDefOperandIdx(OldVR, TRI, /*IsDead*/ false,
+ /*Overlaps*/ true);
+ MachineOperand &MO = DefMI->getOperand(OpIdx);
unsigned SubRegIdx = MO.getSubReg();
+
LaneBitmask Mask = SubRegIdx ? TRI->getSubRegIndexLaneMask(SubRegIdx)
: MRI->getMaxLaneMaskForVReg(MO.getReg());
const TargetRegisterClass *RC = TRI->getRegClassForOperandReg(*MRI, MO);
+
Register NewVR = MRI->createVirtualRegister(RC);
MO.setReg(NewVR);
MO.setSubReg(AMDGPU::NoRegister);
- MO.setIsUndef(false);
+ MO.setIsUndef(false); // keep partial-def semantics via subranges/uses
LIS->ReplaceMachineInstrInMaps(*DefMI, *DefMI);
- rewriteUses(DefMI, OldVR, Mask, NewVR, LI, VNI);
+#ifndef NDEBUG
+ LLVM_DEBUG({
+ dbgs() << "[SPLIT] def ";
+ DefMI->print(dbgs());
+ dbgs() << " lanes=" << PrintLaneMask(Mask) << " -> new vreg "
+ << printReg(NewVR) << '\n';
+ });
+#endif
+
+ rewriteUses(DefMI, OldVR, Mask, NewVR, LI, VNI);
LIS->createAndComputeVirtRegInterval(NewVR);
}
+//===----------------------------------------------------------------------===//
+// rewriteUses
+// For each use of OldVR reached by VNI:
+// * exact lane match → replace with NewVR,
+// * strict subset → keep subindex, swap vreg,
+// * super/mixed → build REG_SEQUENCE (OldVR for untouched lanes,
+// NewVR for rewritten lanes), extend liveness,
+// swap the operand.
+//===----------------------------------------------------------------------===//
+
void AMDGPURebuildSSALegacy::rewriteUses(MachineInstr *DefMI, Register OldVR,
- LaneBitmask MaskToRewrite, Register NewVR,
- LiveInterval &LI, VNInfo *VNI) {
+ LaneBitmask MaskToRewrite,
+ Register NewVR, LiveInterval &LI,
+ VNInfo *VNI) {
+ const TargetRegisterClass *NewRC = TRI->getRegClassForReg(*MRI, NewVR);
+
+#ifndef NDEBUG
+ LLVM_DEBUG(dbgs() << "[RW] rewriting uses of " << printReg(OldVR)
+ << " lanes=" << PrintLaneMask(MaskToRewrite) << " with "
+ << printReg(NewVR) << '\n');
+#endif
+
for (MachineOperand &MO :
llvm::make_early_inc_range(MRI->use_operands(OldVR))) {
MachineInstr *UseMI = MO.getParent();
- if (DefMI == UseMI)
+ if (UseMI == DefMI)
continue;
- SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI);
- if (UseMI->getParent() == DefMI->getParent()) {
- SlotIndex DefIdx = LIS->getInstructionIndex(*DefMI);
-
- if (DefIdx >= UseIdx) {
- if (MLI->isLoopHeader(UseMI->getParent()) && UseMI->isPHI()) {
- unsigned OpIdx = UseMI->getOperandNo(&MO);
- MachineBasicBlock *Pred = UseMI->getOperand(++OpIdx).getMBB();
- SlotIndex PredEnd = LIS->getMBBEndIdx(Pred);
- VNInfo *InV = LI.getVNInfoBefore(PredEnd);
-
- if (InV != VNI)
- continue;
- } else
- continue;
- }
- } else {
- if (UseMI->isPHI()) {
- unsigned OpIdx = UseMI->getOperandNo(&MO);
- MachineBasicBlock *Pred = UseMI->getOperand(++OpIdx).getMBB();
- SlotIndex PredEnd = LIS->getMBBEndIdx(Pred);
- VNInfo *InV = LI.getVNInfoBefore(PredEnd);
+ if (!reachedByThisVNI(LI, DefMI, UseMI, MO, VNI))
+ continue;
- if (InV != VNI)
- continue;
- } else if (!MDT->dominates(DefMI->getParent(), UseMI->getParent()))
- continue;
- }
- const TargetRegisterClass *NewRC = TRI->getRegClassForReg(*MRI, NewVR);
- const TargetRegisterClass *OpRC = TRI->getRegClassForOperandReg(*MRI, MO);
- LaneBitmask OpMask = MRI->getMaxLaneMaskForVReg(MO.getReg());
- if (MO.getSubReg()) {
- OpMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
- }
+ LaneBitmask OpMask = operandLaneMask(MO);
if ((OpMask & MaskToRewrite).none())
continue;
- if (isOfRegClass(getRegSubRegPair(MO), *NewRC, *MRI) &&
- OpMask == MaskToRewrite) {
+
+ const TargetRegisterClass *OpRC = TRI->getRegClassForOperandReg(*MRI, MO);
+
+ // 1) Exact match fast path.
+ if (OpMask == MaskToRewrite &&
+ isOfRegClass(getRegSubRegPair(MO), *NewRC, *MRI)) {
+#ifndef NDEBUG
+ LLVM_DEBUG(dbgs() << " [RW] exact -> " << printReg(NewVR) << " at ";
+ UseMI->print(dbgs()));
+#endif
MO.setReg(NewVR);
MO.setSubReg(AMDGPU::NoRegister);
+ continue;
+ }
+
+ // 2) Super/mixed vs subset split.
+ if ((OpMask & ~MaskToRewrite).any()) {
+ // SUPER/MIXED: build RS and swap.
+ SmallVector<LaneBitmask, 4> LanesToExtend;
+ SlotIndex RSIdx;
+ Register RSv = buildRSForSuperUse(UseMI, MO, OldVR, NewVR, MaskToRewrite,
+ LI, OpRC, RSIdx, LanesToExtend);
+ extendAt(LI, RSIdx, LanesToExtend);
+ MO.setReg(RSv);
+ MO.setSubReg(AMDGPU::NoRegister);
} else {
- if ((OpMask & ~MaskToRewrite).any()) {
- // super-register use
- LaneBitmask Mask = LaneBitmask::getNone();
- // We need to explicitly inform LIS that the subreg is live up to the
- // REG_SEQUENCE
- LaneBitmask SubRangeToExtend = LaneBitmask::getNone();
- Register DestReg = MRI->createVirtualRegister(OpRC);
- MachineBasicBlock::iterator IP(UseMI);
- if (UseMI->isPHI()) {
- unsigned OpIdx = UseMI->getOperandNo(&MO);
- MachineBasicBlock *Pred = UseMI->getOperand(++OpIdx).getMBB();
- IP = Pred->getFirstTerminator();
- }
- auto RS = BuildMI(*IP->getParent(), IP, IP->getDebugLoc(),
- TII->get(TargetOpcode::REG_SEQUENCE), DestReg);
- for (const LiveInterval::SubRange &SR : LI.subranges()) {
- // Does this sub-range contain *any* segment that refers to V ?
- if (SR.getVNInfoAt(UseIdx)) {
- Mask = SR.LaneMask; // this lane mask is live-out of Pred
- dbgs() << PrintLaneMask(Mask) << "\n";
- unsigned SubRegIdx = getSubRegIndexForLaneMask(Mask, TRI);
- if (Mask == MaskToRewrite)
- RS.addReg(NewVR).addImm(SubRegIdx);
- else {
- RS.addReg(OldVR, 0, SubRegIdx).addImm(SubRegIdx);
- // We only save the mask for those sub-regs which have not been
- // rewriten. For the rewiritten we will call the
- // createAndComputeLiveREgInterval afterwords.
- SubRangeToExtend = SR.LaneMask;
- }
- }
- }
- auto RSIdx = LIS->InsertMachineInstrInMaps(*RS);
- LIS->extendToIndices(LI, ArrayRef(RSIdx));
- for (auto &SR : LI.subranges()) {
- if (SR.LaneMask == SubRangeToExtend)
- LIS->extendToIndices(SR, ArrayRef(RSIdx));
- }
- MO.setReg(RS->getOperand(0).getReg());
- } else if ((OpMask & MaskToRewrite) == OpMask) {
- // sub-register use
- if (UseMI->isPHI()) {
- unsigned OpIdx = UseMI->getOperandNo(&MO);
- MachineBasicBlock *Pred = UseMI->getOperand(++OpIdx).getMBB();
- SlotIndex PredEnd = LIS->getMBBEndIdx(Pred);
- VNInfo *InV = LI.getVNInfoBefore(PredEnd);
-
- if (InV != VNI)
- continue;
- }
- unsigned SubRegIdx = MO.getSubReg();
- assert(SubRegIdx != AMDGPU::NoRegister &&
- "Sub-register must not be zero");
- MO.setReg(NewVR);
- MO.setSubReg(SubRegIdx);
- }
+ // SUBSET: keep subindex, swap vreg.
+ unsigned Sub = MO.getSubReg();
+ assert(Sub && "subset path requires a subregister use");
+#ifndef NDEBUG
+ LLVM_DEBUG(dbgs() << " [RW] subset sub" << Sub << " -> "
+ << printReg(NewVR) << " at ";
+ UseMI->print(dbgs()));
+#endif
+ MO.setReg(NewVR);
+ MO.setSubReg(Sub);
}
}
}
+//===----------------------------------------------------------------------===//
+// runOnMachineFunction
+// Walk all vregs, build a dominance-ordered worklist of main-range VNs.
+// First materialize PHIs (post-dominance order), then split non-PHI values.
+// Optionally prune LI afterwards.
+//===----------------------------------------------------------------------===//
+
bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
LIS = &getAnalysis<LiveIntervalsWrapperPass>().getLIS();
MDT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
@@ -582,166 +423,98 @@ bool AMDGPURebuildSSALegacy::runOnMachineFunction(MachineFunction &MF) {
if (MRI->isSSA())
return false;
- CrossBlockVRegs.clear();
- DefBlocks.clear();
- LiveInBlocks.clear();
- PHINodes.clear();
- DefSeen.clear();
- Renamed.clear();
- Visited.clear();
+#ifndef NDEBUG
+ LLVM_DEBUG(dbgs() << "\n=== AMDGPURebuildSSALegacy on " << MF.getName()
+ << " ===\n");
+#endif
DenseSet<Register> Processed;
for (auto &B : MF) {
for (auto &I : B) {
for (auto Def : I.defs()) {
- if (Def.isReg() && Def.getReg().isVirtual()) {
- Register VReg = Def.getReg();
- if (!LIS->hasInterval(VReg) || !Processed.insert(VReg).second)
- continue;
- auto &LI = LIS->getInterval(VReg);
- if (LI.getNumValNums() == 1)
- continue;
-
- SmallVector<VNInfo *, 8> WorkList;
- for (VNInfo *V : LI.vnis())
- // for (const LiveInterval::SubRange &SR : LI.subranges())
- // for (auto V : SR.vnis())
- if (V && !V->isUnused())
- WorkList.push_back(V);
-
- auto DomKey = [&](VNInfo *V) {
- MachineBasicBlock *BB = LIS->getMBBFromIndex(V->def);
- // DomTree preorder index (DFS number) – cheaper than repeated
- // dominates()
- static DenseMap<MachineBasicBlock *, unsigned> Num;
- if (Num.empty()) {
- unsigned N = 0;
- for (auto *Node : depth_first(MDT->getRootNode()))
- Num[Node->getBlock()] = N++;
- }
- return std::pair{Num[BB], V->def}; // tie-break with SlotIndex
- };
-
- llvm::sort(WorkList, [&](VNInfo *A, VNInfo *B) {
- return DomKey(A) < DomKey(B); // strict weak order
- });
-
- for (auto V : WorkList) {
- dbgs() << "id: " << V->id << " Def: " << V->def
- << " isPHI: " << V->isPHIDef() << "\n";
- }
-
-
- // --- the root is now Work[0] ---
- VNInfo *Root = WorkList.front(); // dominator of all others
- // 2. stable-partition: PHIs (except root) to the front
- auto IsPhi = [&](VNInfo *V) { return V != Root && V->isPHIDef(); };
- auto Mid =
- std::stable_partition(WorkList.begin(), WorkList.end(), IsPhi);
-
- // 3. Phase A: build real PHIs, leave incoming defs unchanged
- auto PHISlice =
- llvm::ArrayRef(WorkList).take_front(Mid - WorkList.begin());
- for (auto It = PHISlice.rbegin(); It != PHISlice.rend(); ++It) {
- // Add PHIs in post-dominating order
- buildRealPHI(*It, LI, VReg);
- }
+ if (!Def.isReg() || !Def.getReg().isVirtual())
+ continue;
+
+ Register VReg = Def.getReg();
+ if (!LIS->hasInterval(VReg) || !Processed.insert(VReg).second)
+ continue;
+
+ LiveInterval &LI = LIS->getInterval(VReg);
+ if (LI.getNumValNums() == 1)
+ continue;
+
+#ifndef NDEBUG
+ LLVM_DEBUG(dbgs() << "\n[VREG] " << printReg(VReg) << " has "
+ << LI.getNumValNums() << " VNs\n");
+#endif
- // 4. Phase B: split the remaining VNIs
- for (VNInfo *VNI : llvm::ArrayRef(WorkList).slice(Mid - WorkList.begin())) {
- if (VNI == Root)
- continue; // never touch the dominating root
- splitNonPhiValue(VNI, LI, VReg);
+ // 1) Build worklist from the main range (1 VN per def site).
+ SmallVector<VNInfo *, 8> WorkList;
+ for (VNInfo *V : LI.vnis())
+ if (V && !V->isUnused())
+ WorkList.push_back(V);
+
+ // 2) Sort by (dom-preorder, SlotIndex).
+ auto DomKey = [&](VNInfo *V) {
+ MachineBasicBlock *BB = LIS->getMBBFromIndex(V->def);
+ static DenseMap<MachineBasicBlock *, unsigned> Num;
+ if (Num.empty()) {
+ unsigned N = 0;
+ for (auto *Node : depth_first(MDT->getRootNode()))
+ Num[Node->getBlock()] = N++;
}
+ return std::pair{Num[BB], V->def};
+ };
+ llvm::sort(WorkList,
+ [&](VNInfo *A, VNInfo *B) { return DomKey(A) < DomKey(B); });
- // 5. single clean-up
- // LIS->shrinkToUses(&LI);
- LI.RenumberValues();
+#ifndef NDEBUG
+ LLVM_DEBUG({
+ dbgs() << " [WL] order:\n";
+ for (VNInfo *V : WorkList)
+ dbgs() << " id=" << V->id << " def=" << V->def
+ << (V->isPHIDef() ? " (phi)\n" : "\n");
+ });
+#endif
+
+ // 3) Root dominates all others. Process PHIs first (post-dominating
+ // order).
+ VNInfo *Root = WorkList.front();
+ auto IsPhi = [&](VNInfo *V) { return V != Root && V->isPHIDef(); };
+ auto Mid =
+ std::stable_partition(WorkList.begin(), WorkList.end(), IsPhi);
+
+ auto PHISlice =
+ llvm::ArrayRef(WorkList).take_front(Mid - WorkList.begin());
+ for (auto It = PHISlice.rbegin(); It != PHISlice.rend(); ++It)
+ buildRealPHI(*It, LI, VReg);
+
+ // 4) Then split remaining non-PHI values, skipping the dominating root.
+ for (VNInfo *VNI :
+ llvm::ArrayRef(WorkList).slice(Mid - WorkList.begin())) {
+ if (VNI == Root)
+ continue;
+ splitNonPhiValue(VNI, LI, VReg);
}
+
+ // 5) Single clean-up. (Keep prune optional; leave IsUndef on partial
+ // defs.) LIS->shrinkToUses(&LI);
+ // FIXME: For some reason shrinkToUses makes REG_SEQUENCE use
+ // definitions dead!
+ LI.RenumberValues();
}
}
}
Processed.clear();
- // // Collect all cross-block virtual registers.
- // // This includes registers that are live-in to the function, and registers
- // // that are defined in multiple blocks.
- // // We will insert PHI nodes for these registers.
- // collectCrossBlockVRegs(MF);
-
- // LLVM_DEBUG(dbgs() << "##### Virt regs live cross block ##################\n";
- // for (auto VMP : CrossBlockVRegs) { dbgs() << printVMP(VMP) << " "; });
-
- // for (auto VMP : CrossBlockVRegs) {
- // SmallVector<MachineBasicBlock *> PHIBlocks;
- // LiveInterval &LI = LIS->getInterval(VMP.getVReg());
- // if (LI.hasSubRanges()) {
- // for (const LiveInterval::SubRange &SR : LI.subranges()) {
- // LaneBitmask Mask = SR.LaneMask;
- // if ((Mask & VMP.getLaneMask()) == VMP.getLaneMask()) {
- // for (auto &MBB : MF) {
- // if (SR.liveAt(LIS->getMBBStartIdx(&MBB)))
- // LiveInBlocks[VMP].insert(&MBB);
- // }
- // }
- // }
- // } else {
- // for (auto &MBB : MF) {
- // if (LI.liveAt(LIS->getMBBStartIdx(&MBB)))
- // LiveInBlocks[VMP].insert(&MBB);
- // }
- // }
-
- // SmallPtrSet<MachineBasicBlock *, 8> Defs;
- // for(auto E : DefBlocks) {
- // auto V = E.first;
- // if (V.getVReg() == VMP.getVReg()) {
- // if ((V.getLaneMask() & VMP.getLaneMask()) == VMP.getLaneMask()) {
- // Defs.insert(E.second.begin(), E.second.end());
- // }
- // }
- // }
-
- // LLVM_DEBUG(
- // dbgs() << "findPHINodesPlacement input:\nVreg: "
- // << printVMP(VMP)
- // << "\n";
- // dbgs() << "Def Blocks: \n"; for (auto MBB
- // : Defs) {
- // dbgs() << "MBB_" << MBB->getNumber() << " ";
- // } dbgs() << "\nLiveIn Blocks: \n";
- // for (auto MBB
- // : LiveInBlocks[VMP]) {
- // dbgs() << "MBB_" << MBB->getNumber() << " ";
- // } dbgs()
- // << "\n");
-
- // findPHINodesPlacement(LiveInBlocks[VMP], Defs, PHIBlocks);
- // LLVM_DEBUG(dbgs() << "\nBlocks to insert PHI nodes:\n"; for (auto MBB
- // : PHIBlocks) {
- // dbgs() << "MBB_" << MBB->getNumber() << " ";
- // } dbgs() << "\n");
- // for (auto MBB : PHIBlocks) {
- // if (!PHINodes[MBB->getNumber()].contains(VMP)) {
- // // Insert PHI for VReg. Don't use new VReg here as we'll replace them
- // // in renaming phase.
- // printVMP(VMP);
- // auto PHINode =
- // BuildMI(*MBB, MBB->begin(), DebugLoc(), TII->get(TargetOpcode::PHI))
- // .addReg(VMP.getVReg(), RegState::Define, VMP.getSubReg(MRI, TRI));
- // PHINodes[MBB->getNumber()].insert(VMP);
- // PHIMap[PHINode] = VMP;
- // }
- // }
- // }
-
- // // Rename virtual registers in the basic block.
- // DenseMap<unsigned, VRegDefStack> VregNames;
- // renameVRegs(MF.front(), VregNames);
MF.getProperties().set(MachineFunctionProperties::Property::IsSSA);
- MF.getProperties().reset(MachineFunctionProperties::Property ::NoPHIs);
+ MF.getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
+
+#ifndef NDEBUG
+ LLVM_DEBUG(dbgs() << "=== verify ===\n");
+#endif
MF.verify();
return MRI->isSSA();
}
>From 4c2e3c81a2463014f35c623ed56201db4e18a0f3 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Fri, 29 Aug 2025 14:30:23 +0000
Subject: [PATCH 45/46] Next Use Analysis testing infrastructure sceleton
---
.../Target/AMDGPU/AMDGPUNextUseAnalysis.cpp | 103 +++++++++++++++++-
.../lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h | 3 +
.../CodeGen/AMDGPU/NextUseAnalysis/README.md | 33 ++++++
.../NextUseAnalysis/basic-distances.mir | 58 ++++++++++
.../AMDGPU/NextUseAnalysis/dead-registers.mir | 28 +++++
.../NextUseAnalysis/multiblock-distances.mir | 37 +++++++
.../NextUseAnalysis/subreg-distances.mir | 29 +++++
.../NextUseAnalysis/subreg-interference.mir | 39 +++++++
8 files changed, 329 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/NextUseAnalysis/README.md
create mode 100644 llvm/test/CodeGen/AMDGPU/NextUseAnalysis/basic-distances.mir
create mode 100644 llvm/test/CodeGen/AMDGPU/NextUseAnalysis/dead-registers.mir
create mode 100644 llvm/test/CodeGen/AMDGPU/NextUseAnalysis/multiblock-distances.mir
create mode 100644 llvm/test/CodeGen/AMDGPU/NextUseAnalysis/subreg-distances.mir
create mode 100644 llvm/test/CodeGen/AMDGPU/NextUseAnalysis/subreg-interference.mir
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
index ff432233f40e4..0c2feca1e7d8f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.cpp
@@ -170,6 +170,8 @@ void NextUseResult::analyze(const MachineFunction &MF) {
}
}
dumpUsedInBlock();
+ // Dump complete analysis results for testing
+ LLVM_DEBUG(dumpAllNextUseDistances(MF));
T1->stopTimer();
LLVM_DEBUG(TG->print(llvm::errs()));
}
@@ -332,4 +334,103 @@ void AMDGPUNextUseAnalysisWrapper::getAnalysisUsage(
AMDGPUNextUseAnalysisWrapper::AMDGPUNextUseAnalysisWrapper()
: MachineFunctionPass(ID) {
initializeAMDGPUNextUseAnalysisWrapperPass(*PassRegistry::getPassRegistry());
-}
\ No newline at end of file
+}
+void NextUseResult::dumpAllNextUseDistances(const MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "=== NextUseAnalysis Results for " << MF.getName() << " ===\n");
+
+ for (const auto &MBB : MF) {
+ unsigned MBBNum = MBB.getNumber();
+ LLVM_DEBUG(dbgs() << "\n--- MBB_" << MBBNum << " ---\n");
+
+ if (!NextUseMap.contains(MBBNum)) {
+ LLVM_DEBUG(dbgs() << " No analysis data for this block\n");
+ continue;
+ }
+
+ const NextUseInfo &Info = NextUseMap.at(MBBNum);
+
+ // Process each instruction in the block
+ for (auto II = MBB.begin(), IE = MBB.end(); II != IE; ++II) {
+ const MachineInstr &MI = *II;
+
+ // Print instruction
+ LLVM_DEBUG(dbgs() << " Instr: ");
+ LLVM_DEBUG(MI.print(dbgs(), /*IsStandalone=*/false, /*SkipOpers=*/false,
+ /*SkipDebugLoc=*/true, /*AddNewLine=*/false));
+ LLVM_DEBUG(dbgs() << "\n");
+
+ // Print distances at this instruction
+ if (Info.InstrDist.contains(&MI)) {
+ const VRegDistances &Dists = Info.InstrDist.at(&MI);
+ LLVM_DEBUG(dbgs() << " Next-use distances:\n");
+
+ for (const auto &VRegEntry : Dists) {
+ unsigned VReg = VRegEntry.getFirst();
+ const auto &Records = VRegEntry.getSecond();
+
+ for (const auto &Record : Records) {
+ LaneBitmask LaneMask = Record.first;
+ unsigned Distance = Record.second;
+
+ LLVM_DEBUG(dbgs() << " ");
+
+ // Print register with sub-register if applicable
+ LaneBitmask FullMask = MRI->getMaxLaneMaskForVReg(VReg);
+ if (LaneMask != FullMask) {
+ unsigned SubRegIdx = getSubRegIndexForLaneMask(LaneMask, TRI);
+ LLVM_DEBUG(dbgs() << printReg(VReg, TRI, SubRegIdx, MRI));
+ } else {
+ LLVM_DEBUG(dbgs() << printReg(VReg, TRI));
+ }
+
+ if (Distance == Infinity) {
+ LLVM_DEBUG(dbgs() << " -> DEAD (infinite distance)\n");
+ } else {
+ LLVM_DEBUG(dbgs() << " -> " << Distance << " instructions\n");
+ }
+ }
+ }
+
+ if (Dists.size() == 0) {
+ LLVM_DEBUG(dbgs() << " (no register uses)\n");
+ }
+ } else {
+ LLVM_DEBUG(dbgs() << " (no distance data)\n");
+ }
+ }
+
+ // Print distances at end of block
+ LLVM_DEBUG(dbgs() << " Block End Distances:\n");
+ for (const auto &VRegEntry : Info.Bottom) {
+ unsigned VReg = VRegEntry.getFirst();
+ const auto &Records = VRegEntry.getSecond();
+
+ for (const auto &Record : Records) {
+ LaneBitmask LaneMask = Record.first;
+ unsigned Distance = Record.second;
+
+ LLVM_DEBUG(dbgs() << " ");
+
+ LaneBitmask FullMask = MRI->getMaxLaneMaskForVReg(VReg);
+ if (LaneMask != FullMask) {
+ unsigned SubRegIdx = getSubRegIndexForLaneMask(LaneMask, TRI);
+ LLVM_DEBUG(dbgs() << printReg(VReg, TRI, SubRegIdx, MRI));
+ } else {
+ LLVM_DEBUG(dbgs() << printReg(VReg, TRI));
+ }
+
+ if (Distance == Infinity) {
+ LLVM_DEBUG(dbgs() << " -> DEAD\n");
+ } else {
+ LLVM_DEBUG(dbgs() << " -> " << Distance << "\n");
+ }
+ }
+ }
+
+ if (Info.Bottom.size() == 0) {
+ LLVM_DEBUG(dbgs() << " (no registers live at block end)\n");
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << "\n=== End NextUseAnalysis Results ===\n");
+}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
index ca9b3f837ef85..22536f5de6221 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUNextUseAnalysis.h
@@ -275,6 +275,9 @@ class NextUseResult {
}
void dumpUsedInBlock();
+
+ /// Dump complete next-use analysis results for testing
+ void dumpAllNextUseDistances(const MachineFunction &MF);
};
class AMDGPUNextUseAnalysis : public AnalysisInfoMixin<AMDGPUNextUseAnalysis> {
diff --git a/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/README.md b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/README.md
new file mode 100644
index 0000000000000..ce4dd224853ac
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/README.md
@@ -0,0 +1,33 @@
+# AMDGPU NextUseAnalysis Tests
+
+This directory contains comprehensive tests for the AMDGPU NextUseAnalysis V2 implementation.
+
+## Running Tests
+
+### Individual Test
+```bash
+cd build/Debug
+./bin/llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-next-use -debug-only=amdgpu-next-use \
+ ../../llvm/test/CodeGen/AMDGPU/NextUseAnalysis/basic-distances.mir -o /dev/null 2>&1 | \
+ ./bin/FileCheck ../../llvm/test/CodeGen/AMDGPU/NextUseAnalysis/basic-distances.mir
+```
+
+### All Tests
+```bash
+cd build/Debug
+for test in ../../llvm/test/CodeGen/AMDGPU/NextUseAnalysis/*.mir; do
+ echo "Testing: $test"
+ ./bin/llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-next-use -debug-only=amdgpu-next-use \
+ "$test" -o /dev/null 2>&1 | ./bin/FileCheck "$test" && echo "PASS" || echo "FAIL"
+done
+```
+
+## Test Categories
+
+1. **basic-distances.mir** - Fundamental distance calculations
+2. **subreg-distances.mir** - Sub-register handling
+3. **multiblock-distances.mir** - Control flow analysis
+4. **dead-registers.mir** - Dead register detection
+5. **subreg-interference.mir** - Advanced sub-register interference
+
+All tests validate the V2 implementation's sub-register aware analysis capabilities.
diff --git a/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/basic-distances.mir b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/basic-distances.mir
new file mode 100644
index 0000000000000..c706ca44ead8d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/basic-distances.mir
@@ -0,0 +1,58 @@
+# NOTE: Basic next-use distance calculation test
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-next-use -debug-only=amdgpu-next-use %s -o /dev/null 2>&1 | FileCheck %s
+
+---
+name: basic_distances
+alignment: 1
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: vgpr_32 }
+body: |
+ bb.0:
+ ; Test basic distance calculation
+ ; %0 is used 2 instructions later, then %1 is used immediately, etc.
+ %0:vgpr_32 = V_MOV_B32_e32 42, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 100, implicit $exec
+ %2:vgpr_32 = V_ADD_F32_e32 %1, %1, implicit $exec, implicit $mode
+ %3:vgpr_32 = V_ADD_F32_e32 %0, %2, implicit $exec, implicit $mode
+ S_ENDPGM 0
+
+# CHECK: === NextUseAnalysis Results for basic_distances ===
+# CHECK: --- MBB_0 ---
+
+# First instruction: %0 definition - no incoming register uses
+# CHECK: Instr: %0:vgpr_32 = V_MOV_B32_e32 42, implicit $exec
+# CHECK-NEXT: Next-use distances:
+# CHECK-NEXT: (no register uses)
+
+# Second instruction: %1 definition - %0 will be used in 2 instructions
+# CHECK: Instr: %1:vgpr_32 = V_MOV_B32_e32 100, implicit $exec
+# CHECK-NEXT: Next-use distances:
+# CHECK-NEXT: %0 -> 2 instructions
+
+# Third instruction: %2 definition using %1 twice - %0 in 1 instruction, %1 immediate use
+# CHECK: Instr: %2:vgpr_32 = V_ADD_F32_e32 %1, %1, implicit $exec, implicit $mode
+# CHECK-NEXT: Next-use distances:
+# CHECK-NEXT: %0 -> 1 instructions
+# CHECK-NEXT: %1 -> 0 instructions
+
+# Fourth instruction: %3 definition using %0 and %2 - both immediate use
+# CHECK: Instr: %3:vgpr_32 = V_ADD_F32_e32 %0, %2, implicit $exec, implicit $mode
+# CHECK-NEXT: Next-use distances:
+# CHECK-NEXT: %0 -> 0 instructions
+# CHECK-NEXT: %2 -> 0 instructions
+
+# Final instruction: no register uses
+# CHECK: Instr: S_ENDPGM 0
+# CHECK-NEXT: Next-use distances:
+# CHECK-NEXT: (no register uses)
+
+# Block end: no live registers
+# CHECK: Block End Distances:
+# CHECK-NEXT: (no registers live at block end)
+
+# CHECK: === End NextUseAnalysis Results ===
+...
diff --git a/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/dead-registers.mir b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/dead-registers.mir
new file mode 100644
index 0000000000000..c3db7bd9a7d00
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/dead-registers.mir
@@ -0,0 +1,28 @@
+# NOTE: Dead register detection test
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-next-use -debug-only=amdgpu-next-use %s -o /dev/null 2>&1 | FileCheck %s
+
+---
+name: dead_registers
+alignment: 1
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+body: |
+ bb.0:
+ ; %0 is defined but never used - should be DEAD
+ %0:vgpr_32 = V_MOV_B32_e32 42, implicit $exec
+ %1:vgpr_32 = V_MOV_B32_e32 100, implicit $exec
+ %2:vgpr_32 = V_ADD_F32_e32 %1, %1, implicit $exec, implicit $mode
+ S_ENDPGM 0
+
+# CHECK: === NextUseAnalysis Results for dead_registers ===
+# CHECK: --- MBB_0 ---
+
+# %0 should be considered dead since it's never used
+# CHECK: Block End Distances:
+# Look for either DEAD or very high distance for %0
+
+# CHECK: === End NextUseAnalysis Results ===
+...
diff --git a/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/multiblock-distances.mir b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/multiblock-distances.mir
new file mode 100644
index 0000000000000..c82d0e8265b70
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/multiblock-distances.mir
@@ -0,0 +1,37 @@
+# NOTE: Multi-block next-use distance calculation test
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-next-use -debug-only=amdgpu-next-use %s -o /dev/null 2>&1 | FileCheck %s
+
+---
+name: multiblock_distances
+alignment: 1
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: sreg_32 }
+body: |
+ bb.0:
+ %0:vgpr_32 = V_MOV_B32_e32 42, implicit $exec
+ %3:sreg_32 = S_MOV_B32 1
+ S_CMP_EQ_U32 %3, 0, implicit-def $scc
+ S_CBRANCH_SCC1 %bb.2, implicit $scc
+ S_BRANCH %bb.1
+
+ bb.1:
+ %1:vgpr_32 = V_ADD_F32_e32 %0, %0, implicit $exec, implicit $mode
+ S_BRANCH %bb.2
+
+ bb.2:
+ %2:vgpr_32 = V_MOV_B32_e32 %0, implicit $exec
+ S_ENDPGM 0
+
+# CHECK: === NextUseAnalysis Results for multiblock_distances ===
+
+# Check that we get analysis for all blocks
+# CHECK: --- MBB_0 ---
+# CHECK: --- MBB_1 ---
+# CHECK: --- MBB_2 ---
+
+# CHECK: === End NextUseAnalysis Results ===
+...
diff --git a/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/subreg-distances.mir b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/subreg-distances.mir
new file mode 100644
index 0000000000000..2ddcbea5d3f1c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/subreg-distances.mir
@@ -0,0 +1,29 @@
+# NOTE: Sub-register next-use distance calculation test
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-next-use -debug-only=amdgpu-next-use %s -o /dev/null 2>&1 | FileCheck %s
+
+---
+name: subreg_distances
+alignment: 1
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vreg_64 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+body: |
+ bb.0:
+ ; Test sub-register usage patterns
+ ; %0 is a 64-bit register, we use different sub-registers
+ %0:vreg_64 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1
+ %1:vgpr_32 = V_MOV_B32_e32 42, implicit $exec
+ %2:vgpr_32 = COPY %0.sub0
+ S_ENDPGM 0
+
+# CHECK: === NextUseAnalysis Results for subreg_distances ===
+# CHECK: --- MBB_0 ---
+
+# The test checks that sub-register analysis works correctly
+# CHECK: Instr: %0:vreg_64 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1
+# CHECK: Instr: %1:vgpr_32 = V_MOV_B32_e32 42, implicit $exec
+# CHECK: Instr: %2:vgpr_32 = COPY %0.sub0
+# CHECK: === End NextUseAnalysis Results ===
+...
diff --git a/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/subreg-interference.mir b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/subreg-interference.mir
new file mode 100644
index 0000000000000..a1e9a8ca8b48e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/NextUseAnalysis/subreg-interference.mir
@@ -0,0 +1,39 @@
+# NOTE: Sub-register interference resolution test
+# This tests the V2 capability to handle disjoint sub-register usage without false interference
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -run-pass=amdgpu-next-use -debug-only=amdgpu-next-use %s -o /dev/null 2>&1 | FileCheck %s
+
+---
+name: subreg_interference
+alignment: 1
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vreg_128 }
+ - { id: 1, class: vgpr_32 }
+ - { id: 2, class: vgpr_32 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: vgpr_32 }
+body: |
+ bb.0:
+ ; Create a 128-bit register with sub-register usage
+ ; Lower 64 bits (sub0_sub1) and upper 64 bits (sub2_sub3) should not interfere
+ %0:vreg_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2, %4, %subreg.sub3
+
+ ; Use only lower 64 bits - should not interfere with upper bits
+ %1:vgpr_32 = COPY %0.sub0
+ %2:vgpr_32 = COPY %0.sub1
+
+ ; Later use upper 64 bits - should show separate distance tracking
+ %3:vgpr_32 = COPY %0.sub2
+ %4:vgpr_32 = COPY %0.sub3
+
+ S_ENDPGM 0
+
+# CHECK: === NextUseAnalysis Results for subreg_interference ===
+# CHECK: --- MBB_0 ---
+
+# The V2 implementation should track sub-register lanes separately
+# Look for lane mask information in the output
+# CHECK: Next-use distances:
+
+# CHECK: === End NextUseAnalysis Results ===
+...
>From 4ca05d4c1dbf69cff8f3fae876ad01e64ccd75f2 Mon Sep 17 00:00:00 2001
From: alex-t <alexander.timofeev at amd.com>
Date: Fri, 29 Aug 2025 15:06:32 +0000
Subject: [PATCH 46/46] bulk merge error fixed
---
llvm/unittests/CodeGen/CMakeLists.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/unittests/CodeGen/CMakeLists.txt b/llvm/unittests/CodeGen/CMakeLists.txt
index 9d45945704409..876cee7677267 100644
--- a/llvm/unittests/CodeGen/CMakeLists.txt
+++ b/llvm/unittests/CodeGen/CMakeLists.txt
@@ -49,6 +49,7 @@ add_llvm_unittest(CodeGenTests
TestAsmPrinter.cpp
MLRegAllocDevelopmentFeatures.cpp
VRegMaskPairTest.cpp
+ X86MCInstLowerTest.cpp
)
add_subdirectory(GlobalISel)
More information about the llvm-commits
mailing list