[llvm] [WIP][AMDGPU] Optimized SGPR spills into VGPR lanes for non-interfering spill regions. (PR #93506)

Vikash Gupta via llvm-commits llvm-commits at lists.llvm.org
Mon May 27 23:51:12 PDT 2024


https://github.com/vg0204 created https://github.com/llvm/llvm-project/pull/93506

1. Added StackSlotColoring pass in AMDGPU pass pipeline, just after SGPR register allocation is done in order to optimize the usage of stack slots usage for SGPR spills.

2. It will eventually lead to re-usage of VGPR lanes for those SGPR spills which are non-interfering with each other, while lowering SGPR spills in SILowerSGPRSpill pass.

3. In order to facilitate this optimization, needed to preserve some additional analysis results throughout both phases of SGPR and VGPR register allocation {within StackSlotColoring pass}.

4. Also, while achieving so, resolved a BUG in StackSlotColoring pass related to SlotIndex updation.  

5. Tested out some basic test cases successfully. 

>From 4593814ee2b6862749dc2d074a3efda340c3f7c7 Mon Sep 17 00:00:00 2001
From: vg0204 <Vikash.Gupta at amd.com>
Date: Mon, 27 May 2024 17:44:27 +0530
Subject: [PATCH 1/5] Implemented a patch to optimize SGPR spills.

Introduced the StackSlotColoring pass after SGPR RegAlloc and Spill to optimize stack slots reusage.In the Process, found & resolved a StackSlotColoring bug which was preventing it before. Tested it for few basic testcases, yet to add it in AMDGPU's test folder.
---
 llvm/lib/CodeGen/StackSlotColoring.cpp        | 10 +++++++++-
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |  3 +++
 llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp  | 20 ++++++++++++++++---
 llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp     |  6 ++++--
 4 files changed, 33 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/CodeGen/StackSlotColoring.cpp b/llvm/lib/CodeGen/StackSlotColoring.cpp
index 9fdc8a338b52a..c3e95ed9a3909 100644
--- a/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -64,6 +64,7 @@ namespace {
     MachineFrameInfo *MFI = nullptr;
     const TargetInstrInfo *TII = nullptr;
     const MachineBlockFrequencyInfo *MBFI = nullptr;
+    SlotIndexes *Indexes = nullptr;
 
     // SSIntervals - Spill slot intervals.
     std::vector<LiveInterval*> SSIntervals;
@@ -496,8 +497,14 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
     ++I;
   }
 
-  for (MachineInstr *MI : toErase)
+  /// BUG: As this pass preserves SlotIndexesAnalysis result, any
+  /// addition/removal of MI needs corresponding update in SlotIndexAnalysis,
+  /// not done yet. FIXED: Added needed changes to ensure any pass after this
+  /// pass using SLotIndexAnalysis result get correct SlotIndexEntries.
+  for (MachineInstr *MI : toErase) {
     MI->eraseFromParent();
+    Indexes->removeMachineInstrFromMaps(*MI);
+  }
 
   return changed;
 }
@@ -515,6 +522,7 @@ bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) {
   TII = MF.getSubtarget().getInstrInfo();
   LS = &getAnalysis<LiveStacks>();
   MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
+  Indexes = &getAnalysis<SlotIndexes>();
 
   bool Changed = false;
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 20329dea60275..917af914d8357 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1398,6 +1398,9 @@ bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
   // since FastRegAlloc does the replacements itself.
   addPass(createVirtRegRewriter(false));
 
+  // Optimizes SGPR spills into VGPR lanes for non-interferring spill-ranges.
+  addPass(&StackSlotColoringID);
+
   // Equivalent of PEI for SGPRs.
   addPass(&SILowerSGPRSpillsID);
   addPass(&SIPreAllocateWWMRegsID);
diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index b6a0152f6fa83..9e121b47ad3fb 100644
--- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -52,7 +52,8 @@ class SILowerSGPRSpills : public MachineFunctionPass {
   void calculateSaveRestoreBlocks(MachineFunction &MF);
   bool spillCalleeSavedRegs(MachineFunction &MF,
                             SmallVectorImpl<int> &CalleeSavedFIs);
-  void extendWWMVirtRegLiveness(MachineFunction &MF, LiveIntervals *LIS);
+  void extendWWMVirtRegLiveness(MachineFunction &MF, SlotIndexes *Indexes,
+                                LiveIntervals *LIS);
 
   bool runOnMachineFunction(MachineFunction &MF) override;
 
@@ -260,6 +261,7 @@ bool SILowerSGPRSpills::spillCalleeSavedRegs(
 }
 
 void SILowerSGPRSpills::extendWWMVirtRegLiveness(MachineFunction &MF,
+                                                 SlotIndexes *Indexes,
                                                  LiveIntervals *LIS) {
   // TODO: This is a workaround to avoid the unmodelled liveness computed with
   // whole-wave virtual registers when allocated together with the regular VGPR
@@ -278,14 +280,21 @@ void SILowerSGPRSpills::extendWWMVirtRegLiveness(MachineFunction &MF,
   for (auto Reg : MFI->getSGPRSpillVGPRs()) {
     for (MachineBasicBlock *SaveBlock : SaveBlocks) {
       MachineBasicBlock::iterator InsertBefore = SaveBlock->begin();
+      MachineInstrSpan MIS(InsertBefore, SaveBlock);
+
       DebugLoc DL = SaveBlock->findDebugLoc(InsertBefore);
       auto MIB = BuildMI(*SaveBlock, InsertBefore, DL,
                          TII->get(AMDGPU::IMPLICIT_DEF), Reg);
       MFI->setFlag(Reg, AMDGPU::VirtRegFlag::WWM_REG);
       // Set SGPR_SPILL asm printer flag
       MIB->setAsmPrinterFlag(AMDGPU::SGPR_SPILL);
+
       if (LIS) {
         LIS->InsertMachineInstrInMaps(*MIB);
+      } else if (Indexes) {
+        assert(std::distance(MIS.begin(), InsertBefore) == 1);
+        MachineInstr &Inst = *std::prev(InsertBefore);
+        Indexes->insertMachineInstrInMaps(Inst);
       }
     }
   }
@@ -300,8 +309,13 @@ void SILowerSGPRSpills::extendWWMVirtRegLiveness(MachineFunction &MF,
       auto MIB = BuildMI(*RestoreBlock, InsertBefore, DL,
                          TII->get(TargetOpcode::KILL));
       MIB.addReg(Reg);
-      if (LIS)
+
+      if (LIS) {
         LIS->InsertMachineInstrInMaps(*MIB);
+      } else if (Indexes) {
+        MachineInstr &Inst = *std::prev(InsertBefore);
+        Indexes->insertMachineInstrInMaps(Inst);
+      }
     }
   }
 }
@@ -392,7 +406,7 @@ bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) {
     }
 
     if (SpilledToVirtVGPRLanes) {
-      extendWWMVirtRegLiveness(MF, LIS);
+      extendWWMVirtRegLiveness(MF, Indexes, LIS);
       if (LIS) {
         // Compute the LiveInterval for the newly created virtual registers.
         for (auto Reg : FuncInfo->getSGPRSpillVGPRs())
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index ddb5f71935685..80a720fbed27a 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1775,8 +1775,10 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, int Index,
 
   if (SpillToVGPR) {
 
-    assert(SB.NumSubRegs == VGPRSpills.size() &&
-           "Num of VGPR lanes should be equal to num of SGPRs spilled");
+    assert(SB.NumSubRegs <= VGPRSpills.size() &&
+           "Num of VGPR lanes should be greater or equal to num of SGPRs "
+           "spilled, as Stack Slot Coloring pass assigns different SGPR spills "
+           "into same stack slots");
 
     for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
       Register SubReg =

>From 7af4fd6f4650a750ba0c02c838afde0142f24761 Mon Sep 17 00:00:00 2001
From: vg0204 <Vikash.Gupta at amd.com>
Date: Mon, 27 May 2024 17:44:27 +0530
Subject: [PATCH 2/5] Implemented a patch to optimize SGPR spills.

Introduced the StackSlotColoring pass after SGPR RegAlloc and Spill to optimize stack slots reusage.In the Process, found & resolved a StackSlotColoring bug which was preventing it before. Tested it for few basic testcases, yet to add it in AMDGPU's test folder.
---
 llvm/lib/CodeGen/StackSlotColoring.cpp        | 10 +++++++++-
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |  3 +++
 llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp  | 20 ++++++++++++++++---
 llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp     |  6 ++++--
 4 files changed, 33 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/CodeGen/StackSlotColoring.cpp b/llvm/lib/CodeGen/StackSlotColoring.cpp
index 9fdc8a338b52a..c3e95ed9a3909 100644
--- a/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -64,6 +64,7 @@ namespace {
     MachineFrameInfo *MFI = nullptr;
     const TargetInstrInfo *TII = nullptr;
     const MachineBlockFrequencyInfo *MBFI = nullptr;
+    SlotIndexes *Indexes = nullptr;
 
     // SSIntervals - Spill slot intervals.
     std::vector<LiveInterval*> SSIntervals;
@@ -496,8 +497,14 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
     ++I;
   }
 
-  for (MachineInstr *MI : toErase)
+  /// BUG: As this pass preserves SlotIndexesAnalysis result, any
+  /// addition/removal of MI needs corresponding update in SlotIndexAnalysis,
+  /// not done yet. FIXED: Added needed changes to ensure any pass after this
+  /// pass using SLotIndexAnalysis result get correct SlotIndexEntries.
+  for (MachineInstr *MI : toErase) {
     MI->eraseFromParent();
+    Indexes->removeMachineInstrFromMaps(*MI);
+  }
 
   return changed;
 }
@@ -515,6 +522,7 @@ bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) {
   TII = MF.getSubtarget().getInstrInfo();
   LS = &getAnalysis<LiveStacks>();
   MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
+  Indexes = &getAnalysis<SlotIndexes>();
 
   bool Changed = false;
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index dbbfe34a63863..728cf4fe0281a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1406,6 +1406,9 @@ bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
   // since FastRegAlloc does the replacements itself.
   addPass(createVirtRegRewriter(false));
 
+  // Optimizes SGPR spills into VGPR lanes for non-interferring spill-ranges.
+  addPass(&StackSlotColoringID);
+
   // Equivalent of PEI for SGPRs.
   addPass(&SILowerSGPRSpillsID);
   addPass(&SIPreAllocateWWMRegsID);
diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index b6a0152f6fa83..9e121b47ad3fb 100644
--- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -52,7 +52,8 @@ class SILowerSGPRSpills : public MachineFunctionPass {
   void calculateSaveRestoreBlocks(MachineFunction &MF);
   bool spillCalleeSavedRegs(MachineFunction &MF,
                             SmallVectorImpl<int> &CalleeSavedFIs);
-  void extendWWMVirtRegLiveness(MachineFunction &MF, LiveIntervals *LIS);
+  void extendWWMVirtRegLiveness(MachineFunction &MF, SlotIndexes *Indexes,
+                                LiveIntervals *LIS);
 
   bool runOnMachineFunction(MachineFunction &MF) override;
 
@@ -260,6 +261,7 @@ bool SILowerSGPRSpills::spillCalleeSavedRegs(
 }
 
 void SILowerSGPRSpills::extendWWMVirtRegLiveness(MachineFunction &MF,
+                                                 SlotIndexes *Indexes,
                                                  LiveIntervals *LIS) {
   // TODO: This is a workaround to avoid the unmodelled liveness computed with
   // whole-wave virtual registers when allocated together with the regular VGPR
@@ -278,14 +280,21 @@ void SILowerSGPRSpills::extendWWMVirtRegLiveness(MachineFunction &MF,
   for (auto Reg : MFI->getSGPRSpillVGPRs()) {
     for (MachineBasicBlock *SaveBlock : SaveBlocks) {
       MachineBasicBlock::iterator InsertBefore = SaveBlock->begin();
+      MachineInstrSpan MIS(InsertBefore, SaveBlock);
+
       DebugLoc DL = SaveBlock->findDebugLoc(InsertBefore);
       auto MIB = BuildMI(*SaveBlock, InsertBefore, DL,
                          TII->get(AMDGPU::IMPLICIT_DEF), Reg);
       MFI->setFlag(Reg, AMDGPU::VirtRegFlag::WWM_REG);
       // Set SGPR_SPILL asm printer flag
       MIB->setAsmPrinterFlag(AMDGPU::SGPR_SPILL);
+
       if (LIS) {
         LIS->InsertMachineInstrInMaps(*MIB);
+      } else if (Indexes) {
+        assert(std::distance(MIS.begin(), InsertBefore) == 1);
+        MachineInstr &Inst = *std::prev(InsertBefore);
+        Indexes->insertMachineInstrInMaps(Inst);
       }
     }
   }
@@ -300,8 +309,13 @@ void SILowerSGPRSpills::extendWWMVirtRegLiveness(MachineFunction &MF,
       auto MIB = BuildMI(*RestoreBlock, InsertBefore, DL,
                          TII->get(TargetOpcode::KILL));
       MIB.addReg(Reg);
-      if (LIS)
+
+      if (LIS) {
         LIS->InsertMachineInstrInMaps(*MIB);
+      } else if (Indexes) {
+        MachineInstr &Inst = *std::prev(InsertBefore);
+        Indexes->insertMachineInstrInMaps(Inst);
+      }
     }
   }
 }
@@ -392,7 +406,7 @@ bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) {
     }
 
     if (SpilledToVirtVGPRLanes) {
-      extendWWMVirtRegLiveness(MF, LIS);
+      extendWWMVirtRegLiveness(MF, Indexes, LIS);
       if (LIS) {
         // Compute the LiveInterval for the newly created virtual registers.
         for (auto Reg : FuncInfo->getSGPRSpillVGPRs())
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index ddb5f71935685..80a720fbed27a 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1775,8 +1775,10 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, int Index,
 
   if (SpillToVGPR) {
 
-    assert(SB.NumSubRegs == VGPRSpills.size() &&
-           "Num of VGPR lanes should be equal to num of SGPRs spilled");
+    assert(SB.NumSubRegs <= VGPRSpills.size() &&
+           "Num of VGPR lanes should be greater or equal to num of SGPRs "
+           "spilled, as Stack Slot Coloring pass assigns different SGPR spills "
+           "into same stack slots");
 
     for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
       Register SubReg =

>From e83b6b0de27e50aab8f5fb5cd96ade9639569ebb Mon Sep 17 00:00:00 2001
From: vg0204 <Vikash.Gupta at amd.com>
Date: Tue, 28 May 2024 11:57:29 +0530
Subject: [PATCH 3/5] Added Basic test cases in which stack slots are shared,
 accounting for both equal sized and unequal sized objects sharing the same
 stack slots(with size and alignment corresponding to largest stack object.

---
 llvm/lib/CodeGen/StackSlotColoring.cpp        |  13 +-
 ...er-sgpr-alloc-equal-size-stack-objects.mir | 127 ++++++++++++++++++
 ...gpr-alloc-unequal-size-stack-objects-2.mir | 122 +++++++++++++++++
 ...-sgpr-alloc-unequal-size-stack-objects.mir | 123 +++++++++++++++++
 4 files changed, 382 insertions(+), 3 deletions(-)
 create mode 100755 llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-equal-size-stack-objects.mir
 create mode 100755 llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects-2.mir
 create mode 100755 llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects.mir

diff --git a/llvm/lib/CodeGen/StackSlotColoring.cpp b/llvm/lib/CodeGen/StackSlotColoring.cpp
index c3e95ed9a3909..d655fc4e08ea5 100644
--- a/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -13,6 +13,7 @@
 #include "llvm/ADT/BitVector.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LiveDebugVariables.h"
 #include "llvm/CodeGen/LiveInterval.h"
 #include "llvm/CodeGen/LiveIntervalUnion.h"
 #include "llvm/CodeGen/LiveIntervals.h"
@@ -153,6 +154,13 @@ namespace {
       AU.addRequired<MachineBlockFrequencyInfo>();
       AU.addPreserved<MachineBlockFrequencyInfo>();
       AU.addPreservedID(MachineDominatorsID);
+
+      /// NOTE: As in AMDGPU pass pipeline, reg alloc is spillted into 2 phases and StackSlotColoring is invoked 
+      /// after each phase, it becomes important to preserve additional analyses result to be used by VGPR regAlloc, 
+      /// after being done with SGPR regAlloc and its related passes. 
+      AU.addPreserved<LiveIntervals>();
+      AU.addPreserved<LiveDebugVariables>();
+
       MachineFunctionPass::getAnalysisUsage(AU);
     }
 
@@ -497,10 +505,9 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
     ++I;
   }
 
-  /// BUG: As this pass preserves SlotIndexesAnalysis result, any
+  /// FIXED: As this pass preserves SlotIndexesAnalysis result, any
   /// addition/removal of MI needs corresponding update in SlotIndexAnalysis,
-  /// not done yet. FIXED: Added needed changes to ensure any pass after this
-  /// pass using SLotIndexAnalysis result get correct SlotIndexEntries.
+  /// to avoid corruption of SlotIndexesAnalysis result.
   for (MachineInstr *MI : toErase) {
     MI->eraseFromParent();
     Indexes->removeMachineInstrFromMaps(*MI);
diff --git a/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-equal-size-stack-objects.mir b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-equal-size-stack-objects.mir
new file mode 100755
index 0000000000000..e8651cd6944d1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-equal-size-stack-objects.mir
@@ -0,0 +1,127 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -o - %s | FileCheck -check-prefix=SHARE %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -no-stack-slot-sharing -o - %s | FileCheck -check-prefix=NOSHARE %s
+
+--- |
+
+  define void @stack-slot-color-after-sgpr-alloc(ptr addrspace(1) nocapture readnone %arg, ptr addrspace(1) noalias %arg1) {
+  bb:
+    %tmp = load i32, ptr addrspace(1) null, align 4
+    call void @func(i32 undef)
+    call void @func(i32 %tmp)
+    unreachable
+  }
+
+  declare void @func(i32)
+...
+
+
+---
+name:            stack-slot-color-after-sgpr-alloc
+tracksRegLiveness: true
+frameInfo:
+  adjustsStack:    true
+  hasCalls:        true
+machineFunctionInfo:
+  scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3
+  frameOffsetReg: $sgpr32
+  stackPtrOffsetReg: $sgpr32
+body:             |
+  bb.0:
+    ; SHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; SHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; SHARE-NEXT: {{  $}}
+    ; SHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr0, 1, killed $vgpr2, implicit-def $sgpr0_sgpr1, implicit $sgpr0_sgpr1
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr1, 2, killed $vgpr2, implicit $sgpr0_sgpr1
+    ; SHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; SHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; SHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; SHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 3, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 4, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; SHARE-NEXT: $sgpr0 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1, implicit-def $sgpr0_sgpr1
+    ; SHARE-NEXT: $sgpr1 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 2
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 1, killed $vgpr1, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr3, 2, killed $vgpr1, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; SHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 0
+    ; SHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 1, implicit-def $sgpr2_sgpr3
+    ; SHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 2
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ;
+    ; NOSHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; NOSHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; NOSHARE-NEXT: {{  $}}
+    ; NOSHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr0, 1, killed $vgpr2, implicit-def $sgpr0_sgpr1, implicit $sgpr0_sgpr1
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr1, 2, killed $vgpr2, implicit $sgpr0_sgpr1
+    ; NOSHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; NOSHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 3, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 4, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 3, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 4
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr32, 5, killed $vgpr1
+    ; NOSHARE-NEXT: $sgpr0 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1, implicit-def $sgpr0_sgpr1
+    ; NOSHARE-NEXT: $sgpr1 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 2
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 6, killed $vgpr1, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr3, 7, killed $vgpr1, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 3, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 4
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 5
+    ; NOSHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 6, implicit-def $sgpr2_sgpr3
+    ; NOSHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 7
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    %0:sreg_32_xm0 = COPY $sgpr32
+    %5:sreg_64 = COPY $sgpr0_sgpr1
+    %1:vreg_64 = IMPLICIT_DEF
+    %2:vgpr_32 = FLAT_LOAD_DWORD %1, 0, 0, implicit $exec, implicit $flat_scr
+    %3:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit undef $vgpr0
+    $sgpr32 = COPY %0
+    %4:sreg_32_xm0 = COPY $sgpr32
+    $sgpr0_sgpr1 = COPY %5
+    %6:sreg_64 = COPY $sgpr2_sgpr3
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    $vgpr0 = COPY %2
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit killed $vgpr0
+    $sgpr32 = COPY %4
+    $sgpr2_sgpr3 = COPY %6
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+...
diff --git a/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects-2.mir b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects-2.mir
new file mode 100755
index 0000000000000..f20dee490a83c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects-2.mir
@@ -0,0 +1,122 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -o - %s | FileCheck -check-prefix=SHARE %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -no-stack-slot-sharing -o - %s | FileCheck -check-prefix=NOSHARE %s
+
+--- |
+
+  define void @stack-slot-color-after-sgpr-alloc(ptr addrspace(1) nocapture readnone %arg, ptr addrspace(1) noalias %arg1) {
+  bb:
+    %tmp = load i32, ptr addrspace(1) null, align 4
+    call void @func(i32 undef)
+    call void @func(i32 %tmp)
+    unreachable
+  }
+
+  declare void @func(i32)
+...
+
+---
+name:            stack-slot-color-after-sgpr-alloc
+tracksRegLiveness: true
+frameInfo:
+  adjustsStack:    true
+  hasCalls:        true
+machineFunctionInfo:
+  scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3
+  frameOffsetReg: $sgpr32
+  stackPtrOffsetReg: $sgpr32
+body:             |
+  bb.0:
+    ; SHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; SHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; SHARE-NEXT: {{  $}}
+    ; SHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr0, 1, killed $vgpr2
+    ; SHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; SHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; SHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; SHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 3, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 4, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; SHARE-NEXT: $sgpr0 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 1, killed $vgpr1, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr3, 2, killed $vgpr1, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; SHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 0
+    ; SHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 1, implicit-def $sgpr2_sgpr3
+    ; SHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 2
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ;
+    ; NOSHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; NOSHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; NOSHARE-NEXT: {{  $}}
+    ; NOSHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr0, 1, killed $vgpr2
+    ; NOSHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; NOSHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 2, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 3, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 2, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 3
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr32, 4, killed $vgpr1
+    ; NOSHARE-NEXT: $sgpr0 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 5, killed $vgpr1, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr3, 6, killed $vgpr1, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 2, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 3
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 4
+    ; NOSHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 5, implicit-def $sgpr2_sgpr3
+    ; NOSHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 6
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    %0:sreg_32_xm0 = COPY $sgpr32
+    %5:sreg_32 = COPY $sgpr0
+    %1:vreg_64 = IMPLICIT_DEF
+    %2:vgpr_32 = FLAT_LOAD_DWORD %1, 0, 0, implicit $exec, implicit $flat_scr
+    %3:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit undef $vgpr0
+    $sgpr32 = COPY %0
+    %4:sreg_32_xm0 = COPY $sgpr32
+    $sgpr0 = COPY %5
+    %6:sreg_64 = COPY $sgpr2_sgpr3
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    $vgpr0 = COPY %2
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit killed $vgpr0
+    $sgpr32 = COPY %4
+    $sgpr2_sgpr3 = COPY %6
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+...
diff --git a/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects.mir b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects.mir
new file mode 100755
index 0000000000000..e2f1d3fd0a0af
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects.mir
@@ -0,0 +1,123 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -o - %s | FileCheck -check-prefix=SHARE %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -no-stack-slot-sharing -o - %s | FileCheck -check-prefix=NOSHARE %s
+
+--- |
+
+  define void @stack-slot-color-after-sgpr-alloc(ptr addrspace(1) nocapture readnone %arg, ptr addrspace(1) noalias %arg1) {
+  bb:
+    %tmp = load i32, ptr addrspace(1) null, align 4
+    call void @func(i32 undef)
+    call void @func(i32 %tmp)
+    unreachable
+  }
+
+  declare void @func(i32)
+...
+
+
+---
+name:            stack-slot-color-after-sgpr-alloc
+tracksRegLiveness: true
+frameInfo:
+  adjustsStack:    true
+  hasCalls:        true
+machineFunctionInfo:
+  scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3
+  frameOffsetReg: $sgpr32
+  stackPtrOffsetReg: $sgpr32
+body:             |
+  bb.0:
+    ; SHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; SHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; SHARE-NEXT: {{  $}}
+    ; SHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr2, 1, killed $vgpr2, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr3, 2, killed $vgpr2, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; SHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; SHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; SHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 3, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 4, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; SHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1, implicit-def $sgpr2_sgpr3
+    ; SHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 2
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 1, killed $vgpr1
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; SHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 0
+    ; SHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 1
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ;
+    ; NOSHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; NOSHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; NOSHARE-NEXT: {{  $}}
+    ; NOSHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr2, 1, killed $vgpr2, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr3, 2, killed $vgpr2, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; NOSHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 3, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 4, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 3, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 4
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr32, 5, killed $vgpr1
+    ; NOSHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1, implicit-def $sgpr2_sgpr3
+    ; NOSHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 2
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 6, killed $vgpr1
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 3, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 4
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 5
+    ; NOSHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 6
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    %0:sreg_32_xm0 = COPY $sgpr32
+    %5:sreg_64 = COPY $sgpr2_sgpr3
+    %1:vreg_64 = IMPLICIT_DEF
+    %2:vgpr_32 = FLAT_LOAD_DWORD %1, 0, 0, implicit $exec, implicit $flat_scr
+    %3:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit undef $vgpr0
+    $sgpr32 = COPY %0
+    %4:sreg_32_xm0 = COPY $sgpr32
+    $sgpr2_sgpr3 = COPY %5
+    %6:sreg_32 = COPY $sgpr2
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    $vgpr0 = COPY %2
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit killed $vgpr0
+    $sgpr32 = COPY %4
+    $sgpr2 = COPY %6
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+...

>From b000d6a28b33b467071fb84a3cf6087c83efc77e Mon Sep 17 00:00:00 2001
From: vg0204 <Vikash.Gupta at amd.com>
Date: Mon, 27 May 2024 17:44:27 +0530
Subject: [PATCH 4/5] Implemented a patch to optimize SGPR spills.

Introduced the StackSlotColoring pass after SGPR RegAlloc and Spill to optimize stack slots reusage.In the Process, found & resolved a StackSlotColoring bug which was preventing it before. Tested it for few basic testcases, yet to add it in AMDGPU's test folder.
---
 llvm/lib/CodeGen/StackSlotColoring.cpp        | 10 +++++++++-
 .../lib/Target/AMDGPU/AMDGPUTargetMachine.cpp |  3 +++
 llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp  | 20 ++++++++++++++++---
 llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp     |  6 ++++--
 4 files changed, 33 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/CodeGen/StackSlotColoring.cpp b/llvm/lib/CodeGen/StackSlotColoring.cpp
index 9fdc8a338b52a..c3e95ed9a3909 100644
--- a/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -64,6 +64,7 @@ namespace {
     MachineFrameInfo *MFI = nullptr;
     const TargetInstrInfo *TII = nullptr;
     const MachineBlockFrequencyInfo *MBFI = nullptr;
+    SlotIndexes *Indexes = nullptr;
 
     // SSIntervals - Spill slot intervals.
     std::vector<LiveInterval*> SSIntervals;
@@ -496,8 +497,14 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
     ++I;
   }
 
-  for (MachineInstr *MI : toErase)
+  /// BUG: As this pass preserves SlotIndexesAnalysis result, any
+  /// addition/removal of MI needs corresponding update in SlotIndexAnalysis,
+  /// not done yet. FIXED: Added needed changes to ensure any pass after this
+  /// pass using SLotIndexAnalysis result get correct SlotIndexEntries.
+  for (MachineInstr *MI : toErase) {
     MI->eraseFromParent();
+    Indexes->removeMachineInstrFromMaps(*MI);
+  }
 
   return changed;
 }
@@ -515,6 +522,7 @@ bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) {
   TII = MF.getSubtarget().getInstrInfo();
   LS = &getAnalysis<LiveStacks>();
   MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
+  Indexes = &getAnalysis<SlotIndexes>();
 
   bool Changed = false;
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index dbbfe34a63863..728cf4fe0281a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -1406,6 +1406,9 @@ bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
   // since FastRegAlloc does the replacements itself.
   addPass(createVirtRegRewriter(false));
 
+  // Optimizes SGPR spills into VGPR lanes for non-interferring spill-ranges.
+  addPass(&StackSlotColoringID);
+
   // Equivalent of PEI for SGPRs.
   addPass(&SILowerSGPRSpillsID);
   addPass(&SIPreAllocateWWMRegsID);
diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index b6a0152f6fa83..9e121b47ad3fb 100644
--- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -52,7 +52,8 @@ class SILowerSGPRSpills : public MachineFunctionPass {
   void calculateSaveRestoreBlocks(MachineFunction &MF);
   bool spillCalleeSavedRegs(MachineFunction &MF,
                             SmallVectorImpl<int> &CalleeSavedFIs);
-  void extendWWMVirtRegLiveness(MachineFunction &MF, LiveIntervals *LIS);
+  void extendWWMVirtRegLiveness(MachineFunction &MF, SlotIndexes *Indexes,
+                                LiveIntervals *LIS);
 
   bool runOnMachineFunction(MachineFunction &MF) override;
 
@@ -260,6 +261,7 @@ bool SILowerSGPRSpills::spillCalleeSavedRegs(
 }
 
 void SILowerSGPRSpills::extendWWMVirtRegLiveness(MachineFunction &MF,
+                                                 SlotIndexes *Indexes,
                                                  LiveIntervals *LIS) {
   // TODO: This is a workaround to avoid the unmodelled liveness computed with
   // whole-wave virtual registers when allocated together with the regular VGPR
@@ -278,14 +280,21 @@ void SILowerSGPRSpills::extendWWMVirtRegLiveness(MachineFunction &MF,
   for (auto Reg : MFI->getSGPRSpillVGPRs()) {
     for (MachineBasicBlock *SaveBlock : SaveBlocks) {
       MachineBasicBlock::iterator InsertBefore = SaveBlock->begin();
+      MachineInstrSpan MIS(InsertBefore, SaveBlock);
+
       DebugLoc DL = SaveBlock->findDebugLoc(InsertBefore);
       auto MIB = BuildMI(*SaveBlock, InsertBefore, DL,
                          TII->get(AMDGPU::IMPLICIT_DEF), Reg);
       MFI->setFlag(Reg, AMDGPU::VirtRegFlag::WWM_REG);
       // Set SGPR_SPILL asm printer flag
       MIB->setAsmPrinterFlag(AMDGPU::SGPR_SPILL);
+
       if (LIS) {
         LIS->InsertMachineInstrInMaps(*MIB);
+      } else if (Indexes) {
+        assert(std::distance(MIS.begin(), InsertBefore) == 1);
+        MachineInstr &Inst = *std::prev(InsertBefore);
+        Indexes->insertMachineInstrInMaps(Inst);
       }
     }
   }
@@ -300,8 +309,13 @@ void SILowerSGPRSpills::extendWWMVirtRegLiveness(MachineFunction &MF,
       auto MIB = BuildMI(*RestoreBlock, InsertBefore, DL,
                          TII->get(TargetOpcode::KILL));
       MIB.addReg(Reg);
-      if (LIS)
+
+      if (LIS) {
         LIS->InsertMachineInstrInMaps(*MIB);
+      } else if (Indexes) {
+        MachineInstr &Inst = *std::prev(InsertBefore);
+        Indexes->insertMachineInstrInMaps(Inst);
+      }
     }
   }
 }
@@ -392,7 +406,7 @@ bool SILowerSGPRSpills::runOnMachineFunction(MachineFunction &MF) {
     }
 
     if (SpilledToVirtVGPRLanes) {
-      extendWWMVirtRegLiveness(MF, LIS);
+      extendWWMVirtRegLiveness(MF, Indexes, LIS);
       if (LIS) {
         // Compute the LiveInterval for the newly created virtual registers.
         for (auto Reg : FuncInfo->getSGPRSpillVGPRs())
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index ddb5f71935685..80a720fbed27a 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -1775,8 +1775,10 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, int Index,
 
   if (SpillToVGPR) {
 
-    assert(SB.NumSubRegs == VGPRSpills.size() &&
-           "Num of VGPR lanes should be equal to num of SGPRs spilled");
+    assert(SB.NumSubRegs <= VGPRSpills.size() &&
+           "Num of VGPR lanes should be greater or equal to num of SGPRs "
+           "spilled, as Stack Slot Coloring pass assigns different SGPR spills "
+           "into same stack slots");
 
     for (unsigned i = 0, e = SB.NumSubRegs; i < e; ++i) {
       Register SubReg =

>From d3c8c421427a6d89b6d16512b9fd4ba5136ff582 Mon Sep 17 00:00:00 2001
From: vg0204 <Vikash.Gupta at amd.com>
Date: Tue, 28 May 2024 11:57:29 +0530
Subject: [PATCH 5/5] Added Basic test cases in which stack slots are shared,
 accounting for both equal sized and unequal sized objects sharing the same
 stack slots(with size and alignment corresponding to largest stack object.

---
 llvm/lib/CodeGen/StackSlotColoring.cpp        |  13 +-
 ...er-sgpr-alloc-equal-size-stack-objects.mir | 127 ++++++++++++++++++
 ...gpr-alloc-unequal-size-stack-objects-2.mir | 122 +++++++++++++++++
 ...-sgpr-alloc-unequal-size-stack-objects.mir | 123 +++++++++++++++++
 4 files changed, 382 insertions(+), 3 deletions(-)
 create mode 100755 llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-equal-size-stack-objects.mir
 create mode 100755 llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects-2.mir
 create mode 100755 llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects.mir

diff --git a/llvm/lib/CodeGen/StackSlotColoring.cpp b/llvm/lib/CodeGen/StackSlotColoring.cpp
index c3e95ed9a3909..d655fc4e08ea5 100644
--- a/llvm/lib/CodeGen/StackSlotColoring.cpp
+++ b/llvm/lib/CodeGen/StackSlotColoring.cpp
@@ -13,6 +13,7 @@
 #include "llvm/ADT/BitVector.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/LiveDebugVariables.h"
 #include "llvm/CodeGen/LiveInterval.h"
 #include "llvm/CodeGen/LiveIntervalUnion.h"
 #include "llvm/CodeGen/LiveIntervals.h"
@@ -153,6 +154,13 @@ namespace {
       AU.addRequired<MachineBlockFrequencyInfo>();
       AU.addPreserved<MachineBlockFrequencyInfo>();
       AU.addPreservedID(MachineDominatorsID);
+
+      /// NOTE: As in AMDGPU pass pipeline, reg alloc is spillted into 2 phases and StackSlotColoring is invoked 
+      /// after each phase, it becomes important to preserve additional analyses result to be used by VGPR regAlloc, 
+      /// after being done with SGPR regAlloc and its related passes. 
+      AU.addPreserved<LiveIntervals>();
+      AU.addPreserved<LiveDebugVariables>();
+
       MachineFunctionPass::getAnalysisUsage(AU);
     }
 
@@ -497,10 +505,9 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) {
     ++I;
   }
 
-  /// BUG: As this pass preserves SlotIndexesAnalysis result, any
+  /// FIXED: As this pass preserves SlotIndexesAnalysis result, any
   /// addition/removal of MI needs corresponding update in SlotIndexAnalysis,
-  /// not done yet. FIXED: Added needed changes to ensure any pass after this
-  /// pass using SLotIndexAnalysis result get correct SlotIndexEntries.
+  /// to avoid corruption of SlotIndexesAnalysis result.
   for (MachineInstr *MI : toErase) {
     MI->eraseFromParent();
     Indexes->removeMachineInstrFromMaps(*MI);
diff --git a/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-equal-size-stack-objects.mir b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-equal-size-stack-objects.mir
new file mode 100755
index 0000000000000..e8651cd6944d1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-equal-size-stack-objects.mir
@@ -0,0 +1,127 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -o - %s | FileCheck -check-prefix=SHARE %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -no-stack-slot-sharing -o - %s | FileCheck -check-prefix=NOSHARE %s
+
+--- |
+
+  define void @stack-slot-color-after-sgpr-alloc(ptr addrspace(1) nocapture readnone %arg, ptr addrspace(1) noalias %arg1) {
+  bb:
+    %tmp = load i32, ptr addrspace(1) null, align 4
+    call void @func(i32 undef)
+    call void @func(i32 %tmp)
+    unreachable
+  }
+
+  declare void @func(i32)
+...
+
+
+---
+name:            stack-slot-color-after-sgpr-alloc
+tracksRegLiveness: true
+frameInfo:
+  adjustsStack:    true
+  hasCalls:        true
+machineFunctionInfo:
+  scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3
+  frameOffsetReg: $sgpr32
+  stackPtrOffsetReg: $sgpr32
+body:             |
+  bb.0:
+    ; SHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; SHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; SHARE-NEXT: {{  $}}
+    ; SHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr0, 1, killed $vgpr2, implicit-def $sgpr0_sgpr1, implicit $sgpr0_sgpr1
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr1, 2, killed $vgpr2, implicit $sgpr0_sgpr1
+    ; SHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; SHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; SHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; SHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 3, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 4, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; SHARE-NEXT: $sgpr0 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1, implicit-def $sgpr0_sgpr1
+    ; SHARE-NEXT: $sgpr1 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 2
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 1, killed $vgpr1, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr3, 2, killed $vgpr1, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; SHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 0
+    ; SHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 1, implicit-def $sgpr2_sgpr3
+    ; SHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 2
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ;
+    ; NOSHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; NOSHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; NOSHARE-NEXT: {{  $}}
+    ; NOSHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr0, 1, killed $vgpr2, implicit-def $sgpr0_sgpr1, implicit $sgpr0_sgpr1
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr1, 2, killed $vgpr2, implicit $sgpr0_sgpr1
+    ; NOSHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; NOSHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 3, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 4, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 3, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 4
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr32, 5, killed $vgpr1
+    ; NOSHARE-NEXT: $sgpr0 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1, implicit-def $sgpr0_sgpr1
+    ; NOSHARE-NEXT: $sgpr1 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 2
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 6, killed $vgpr1, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr3, 7, killed $vgpr1, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 3, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 4
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 5
+    ; NOSHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 6, implicit-def $sgpr2_sgpr3
+    ; NOSHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 7
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    %0:sreg_32_xm0 = COPY $sgpr32
+    %5:sreg_64 = COPY $sgpr0_sgpr1
+    %1:vreg_64 = IMPLICIT_DEF
+    %2:vgpr_32 = FLAT_LOAD_DWORD %1, 0, 0, implicit $exec, implicit $flat_scr
+    %3:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit undef $vgpr0
+    $sgpr32 = COPY %0
+    %4:sreg_32_xm0 = COPY $sgpr32
+    $sgpr0_sgpr1 = COPY %5
+    %6:sreg_64 = COPY $sgpr2_sgpr3
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    $vgpr0 = COPY %2
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit killed $vgpr0
+    $sgpr32 = COPY %4
+    $sgpr2_sgpr3 = COPY %6
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+...
diff --git a/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects-2.mir b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects-2.mir
new file mode 100755
index 0000000000000..f20dee490a83c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects-2.mir
@@ -0,0 +1,122 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -o - %s | FileCheck -check-prefix=SHARE %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -no-stack-slot-sharing -o - %s | FileCheck -check-prefix=NOSHARE %s
+
+--- |
+
+  define void @stack-slot-color-after-sgpr-alloc(ptr addrspace(1) nocapture readnone %arg, ptr addrspace(1) noalias %arg1) {
+  bb:
+    %tmp = load i32, ptr addrspace(1) null, align 4
+    call void @func(i32 undef)
+    call void @func(i32 %tmp)
+    unreachable
+  }
+
+  declare void @func(i32)
+...
+
+---
+name:            stack-slot-color-after-sgpr-alloc
+tracksRegLiveness: true
+frameInfo:
+  adjustsStack:    true
+  hasCalls:        true
+machineFunctionInfo:
+  scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3
+  frameOffsetReg: $sgpr32
+  stackPtrOffsetReg: $sgpr32
+body:             |
+  bb.0:
+    ; SHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; SHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; SHARE-NEXT: {{  $}}
+    ; SHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr0, 1, killed $vgpr2
+    ; SHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; SHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; SHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; SHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 3, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 4, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; SHARE-NEXT: $sgpr0 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 1, killed $vgpr1, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr3, 2, killed $vgpr1, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; SHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 0
+    ; SHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 1, implicit-def $sgpr2_sgpr3
+    ; SHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 2
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ;
+    ; NOSHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; NOSHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; NOSHARE-NEXT: {{  $}}
+    ; NOSHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr0, 1, killed $vgpr2
+    ; NOSHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; NOSHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 2, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 3, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 2, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 3
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr32, 4, killed $vgpr1
+    ; NOSHARE-NEXT: $sgpr0 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 5, killed $vgpr1, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr3, 6, killed $vgpr1, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 2, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 3
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 4
+    ; NOSHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 5, implicit-def $sgpr2_sgpr3
+    ; NOSHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 6
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    %0:sreg_32_xm0 = COPY $sgpr32
+    %5:sreg_32 = COPY $sgpr0
+    %1:vreg_64 = IMPLICIT_DEF
+    %2:vgpr_32 = FLAT_LOAD_DWORD %1, 0, 0, implicit $exec, implicit $flat_scr
+    %3:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit undef $vgpr0
+    $sgpr32 = COPY %0
+    %4:sreg_32_xm0 = COPY $sgpr32
+    $sgpr0 = COPY %5
+    %6:sreg_64 = COPY $sgpr2_sgpr3
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    $vgpr0 = COPY %2
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit killed $vgpr0
+    $sgpr32 = COPY %4
+    $sgpr2_sgpr3 = COPY %6
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+...
diff --git a/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects.mir b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects.mir
new file mode 100755
index 0000000000000..e2f1d3fd0a0af
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/stack-slot-color-after-sgpr-alloc-unequal-size-stack-objects.mir
@@ -0,0 +1,123 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -o - %s | FileCheck -check-prefix=SHARE %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -stress-regalloc=3 -start-before=greedy -stop-before=prologepilog -no-stack-slot-sharing -o - %s | FileCheck -check-prefix=NOSHARE %s
+
+--- |
+
+  define void @stack-slot-color-after-sgpr-alloc(ptr addrspace(1) nocapture readnone %arg, ptr addrspace(1) noalias %arg1) {
+  bb:
+    %tmp = load i32, ptr addrspace(1) null, align 4
+    call void @func(i32 undef)
+    call void @func(i32 %tmp)
+    unreachable
+  }
+
+  declare void @func(i32)
+...
+
+
+---
+name:            stack-slot-color-after-sgpr-alloc
+tracksRegLiveness: true
+frameInfo:
+  adjustsStack:    true
+  hasCalls:        true
+machineFunctionInfo:
+  scratchRSrcReg: $sgpr0_sgpr1_sgpr2_sgpr3
+  frameOffsetReg: $sgpr32
+  stackPtrOffsetReg: $sgpr32
+body:             |
+  bb.0:
+    ; SHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; SHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; SHARE-NEXT: {{  $}}
+    ; SHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; SHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr2, 1, killed $vgpr2, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr3, 2, killed $vgpr2, implicit $sgpr2_sgpr3
+    ; SHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; SHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; SHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; SHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 3, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; SHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 4, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; SHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1, implicit-def $sgpr2_sgpr3
+    ; SHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 2
+    ; SHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 1, killed $vgpr1
+    ; SHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; SHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; SHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 3, implicit-def $sgpr4_sgpr5
+    ; SHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 4
+    ; SHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; SHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; SHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 0
+    ; SHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 1
+    ; SHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ;
+    ; NOSHARE-LABEL: name: stack-slot-color-after-sgpr-alloc
+    ; NOSHARE: liveins: $sgpr30, $sgpr31, $vgpr63
+    ; NOSHARE-NEXT: {{  $}}
+    ; NOSHARE-NEXT: renamable $vgpr2 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr30, 0, $vgpr63
+    ; NOSHARE-NEXT: $vgpr63 = SI_SPILL_S32_TO_VGPR killed $sgpr31, 1, $vgpr63
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr32, 0, killed $vgpr2
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr2, 1, killed $vgpr2, implicit-def $sgpr2_sgpr3, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr3, 2, killed $vgpr2, implicit $sgpr2_sgpr3
+    ; NOSHARE-NEXT: renamable $vgpr0_vgpr1 = IMPLICIT_DEF
+    ; NOSHARE-NEXT: renamable $vgpr0 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+    ; NOSHARE-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.9, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR $sgpr4, 3, killed $vgpr2, implicit-def $sgpr4_sgpr5, implicit $sgpr4_sgpr5
+    ; NOSHARE-NEXT: renamable $vgpr2 = SI_SPILL_S32_TO_VGPR killed $sgpr5, 4, killed $vgpr2, implicit killed $sgpr4_sgpr5
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr2, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 3, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr2, 4
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit undef $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 0
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr32, 5, killed $vgpr1
+    ; NOSHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 1, implicit-def $sgpr2_sgpr3
+    ; NOSHARE-NEXT: $sgpr3 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 2
+    ; NOSHARE-NEXT: renamable $vgpr1 = SI_SPILL_S32_TO_VGPR $sgpr2, 6, killed $vgpr1
+    ; NOSHARE-NEXT: SI_SPILL_WWM_V32_SAVE $vgpr1, %stack.8, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ; NOSHARE-NEXT: $vgpr0 = SI_SPILL_V32_RESTORE %stack.9, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.9, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr1, 3, implicit-def $sgpr4_sgpr5
+    ; NOSHARE-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR killed $vgpr1, 4
+    ; NOSHARE-NEXT: dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @func, csr_amdgpu, implicit $vgpr0
+    ; NOSHARE-NEXT: renamable $vgpr0 = SI_SPILL_WWM_V32_RESTORE %stack.8, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.8, addrspace 5)
+    ; NOSHARE-NEXT: $sgpr32 = SI_RESTORE_S32_FROM_VGPR $vgpr0, 5
+    ; NOSHARE-NEXT: $sgpr2 = SI_RESTORE_S32_FROM_VGPR killed $vgpr0, 6
+    ; NOSHARE-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    %0:sreg_32_xm0 = COPY $sgpr32
+    %5:sreg_64 = COPY $sgpr2_sgpr3
+    %1:vreg_64 = IMPLICIT_DEF
+    %2:vgpr_32 = FLAT_LOAD_DWORD %1, 0, 0, implicit $exec, implicit $flat_scr
+    %3:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit undef $vgpr0
+    $sgpr32 = COPY %0
+    %4:sreg_32_xm0 = COPY $sgpr32
+    $sgpr2_sgpr3 = COPY %5
+    %6:sreg_32 = COPY $sgpr2
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    ADJCALLSTACKUP 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+    $vgpr0 = COPY %2
+    dead $sgpr30_sgpr31 = SI_CALL %3, @func, csr_amdgpu, implicit killed $vgpr0
+    $sgpr32 = COPY %4
+    $sgpr2 = COPY %6
+    ADJCALLSTACKDOWN 0, 0, implicit-def $scc, implicit-def $sgpr32, implicit $sgpr32, implicit $sgpr32
+...



More information about the llvm-commits mailing list