[llvm] f2e8e2f - [AMDGPU] Make chain functions receive a stack pointer (#184616)

via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 6 02:01:48 PST 2026


Author: Diana Picus
Date: 2026-03-06T11:01:42+01:00
New Revision: f2e8e2faffb0a1788025dec11c58c015a34757eb

URL: https://github.com/llvm/llvm-project/commit/f2e8e2faffb0a1788025dec11c58c015a34757eb
DIFF: https://github.com/llvm/llvm-project/commit/f2e8e2faffb0a1788025dec11c58c015a34757eb.diff

LOG: [AMDGPU] Make chain functions receive a stack pointer (#184616)

Currently, chain functions are free to set up a stack pointer if they
need one, and they assume they can start at scratch offset 0. This is
not correct if CWSR and dynamic VGPRs are both enabled, since in that
case we need to reserve an area at offset 0 for the trap handler, but
only when running on a compute queue (which we determine at runtime).
Rather than duplicate in every chain function the code sequence for
determining if/how much scratch space needs to be reserved, this patch
changes the ABI of chain functions so that they receive a stack pointer
from their caller.

Since chain functions can no longer use plain offsets to access their
own stack, we'll also need to allocate a frame pointer more often (and
sometimes also a base pointer). For simplicity, we use the same
registers that `amdgpu_gfx` functions do (s32, s33, s34). This may
change in the future. Chain functions never return to their caller and
thus don't need to preserve the frame or base pointer.

Another consequence is that now we might need to realign the stack in
some cases (since it no longer starts at the infinitely aligned 0).

Added: 
    

Modified: 
    llvm/docs/AMDGPUUsage.rst
    llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
    llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
    llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
    llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-cc.ll
    llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll
    llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-preserve-cc.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.set.inactive.chain.arg.ll
    llvm/test/CodeGen/AMDGPU/llvm.sponentry.ll
    llvm/test/CodeGen/AMDGPU/pei-amdgpu-cs-chain-preserve.mir
    llvm/test/CodeGen/AMDGPU/pei-amdgpu-cs-chain.mir

Removed: 
    


################################################################################
diff  --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 70523faccc395..036b4461ec06d 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -2252,10 +2252,10 @@ The AMDGPU backend supports the following calling conventions:
                                      case the backend assumes that there are no inactive lanes upon entry; any inactive
                                      lanes that need to be preserved must be explicitly present in the IR).
 
-                                     Wave scratch is "empty" at function boundaries. There is no stack pointer input
-                                     or output value, but functions are free to use scratch starting from an initial
-                                     stack pointer. Calls to ``amdgpu_gfx`` functions are allowed and behave like they
-                                     do in ``amdgpu_cs`` functions.
+                                     Chain functions receive a stack pointer from their caller (in s32), similar to
+                                     ``amdgpu_gfx`` functions. If needed, the frame pointer is s33 and the base pointer
+                                     is s34. Calls to ``amdgpu_gfx`` functions are allowed and behave like they do in
+                                     ``amdgpu_cs`` functions.
 
                                      A function may have multiple exits (e.g. one chain exit and one plain ``ret void``
                                      for when the wave ends), but all ``llvm.amdgcn.cs.chain`` exits must be in

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
index 1317210a445d2..5e5ad3dbd713a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
@@ -101,9 +101,7 @@ class AMDGPUMachineFunction : public MachineFunctionInfo {
   bool isChainFunction() const { return IsChainFunction; }
 
   // The stack is empty upon entry to this function.
-  bool isBottomOfStack() const {
-    return isEntryFunction() || isChainFunction();
-  }
+  bool isBottomOfStack() const { return isEntryFunction(); }
 
   bool isMemoryBound() const {
     return MemoryBound;

diff  --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index 4a62af56fd8e5..9ee9e63fdab26 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -1216,33 +1216,24 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
   // to determine the end of the prologue.
   DebugLoc DL;
 
-  if (FuncInfo->isChainFunction()) {
-    // Functions with the amdgpu_cs_chain[_preserve] CC don't receive a SP, but
-    // are free to set one up if they need it.
-    bool UseSP = requiresStackPointerReference(MF);
-    if (UseSP) {
-      assert(StackPtrReg != AMDGPU::SP_REG);
-
-      BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_MOV_B32), StackPtrReg)
-          .addImm(MFI.getStackSize() * getScratchScaleFactor(ST));
-    }
-  }
-
   bool HasFP = false;
   bool HasBP = false;
   uint32_t NumBytes = MFI.getStackSize();
   uint32_t RoundedSize = NumBytes;
 
+  // Chain functions never return, so there's no need to save and restore the FP
+  // or BP.
+  bool SavesStackRegs = !FuncInfo->isChainFunction();
+
   if (TRI.hasStackRealignment(MF))
     HasFP = true;
 
   Register FramePtrRegScratchCopy;
   if (!HasFP && !hasFP(MF)) {
     // Emit the CSR spill stores with SP base register.
-    emitCSRSpillStores(MF, MBB, MBBI, DL, LiveUnits,
-                       FuncInfo->isChainFunction() ? Register() : StackPtrReg,
+    emitCSRSpillStores(MF, MBB, MBBI, DL, LiveUnits, StackPtrReg,
                        FramePtrRegScratchCopy);
-  } else {
+  } else if (SavesStackRegs) {
     // CSR spill stores will use FP as base register.
     Register SGPRForFPSaveRestoreCopy =
         FuncInfo->getScratchSGPRCopyDstReg(FramePtrReg);
@@ -1327,17 +1318,17 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
 
   bool FPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(FramePtrReg);
   (void)FPSaved;
-  assert((!HasFP || FPSaved) &&
+  assert((!HasFP || FPSaved || !SavesStackRegs) &&
          "Needed to save FP but didn't save it anywhere");
 
   // If we allow spilling to AGPRs we may have saved FP but then spill
   // everything into AGPRs instead of the stack.
-  assert((HasFP || !FPSaved || EnableSpillVGPRToAGPR) &&
+  assert((HasFP || !FPSaved || !SavesStackRegs || EnableSpillVGPRToAGPR) &&
          "Saved FP but didn't need it");
 
   bool BPSaved = FuncInfo->hasPrologEpilogSGPRSpillEntry(BasePtrReg);
   (void)BPSaved;
-  assert((!HasBP || BPSaved) &&
+  assert((!HasBP || BPSaved || !SavesStackRegs) &&
          "Needed to save BP but didn't save it anywhere");
 
   assert((HasBP || !BPSaved) && "Saved BP but didn't need it");
@@ -1354,6 +1345,10 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF,
   if (FuncInfo->isEntryFunction())
     return;
 
+  const MachineFrameInfo &MFI = MF.getFrameInfo();
+  if (FuncInfo->isChainFunction() && !MFI.hasTailCall())
+    return;
+
   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
   const SIInstrInfo *TII = ST.getInstrInfo();
   const SIRegisterInfo &TRI = TII->getRegisterInfo();
@@ -1371,7 +1366,6 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF,
     MBBI = MBB.getFirstTerminator();
   }
 
-  const MachineFrameInfo &MFI = MF.getFrameInfo();
   uint32_t NumBytes = MFI.getStackSize();
   uint32_t RoundedSize = FuncInfo->isStackRealigned()
                              ? NumBytes + MFI.getMaxAlign().value()
@@ -1427,8 +1421,7 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF,
       MIB.setMIFlag(MachineInstr::FrameDestroy);
   } else {
     // Insert the CSR spill restores with SP as the base register.
-    emitCSRSpillRestores(MF, MBB, MBBI, DL, LiveUnits,
-                         FuncInfo->isChainFunction() ? Register() : StackPtrReg,
+    emitCSRSpillRestores(MF, MBB, MBBI, DL, LiveUnits, StackPtrReg,
                          FramePtrRegScratchCopy);
   }
 }
@@ -1656,6 +1649,11 @@ void SIFrameLowering::determinePrologEpilogSGPRSaves(
     MFI->setSGPRForEXECCopy(AMDGPU::NoRegister);
   }
 
+  // Chain functions don't return to the caller, so they don't need to preserve
+  // the FP and BP.
+  if (MFI->isChainFunction())
+    return;
+
   // hasFP only knows about stack objects that already exist. We're now
   // determining the stack slots that will be created, so we have to predict
   // them. Stack objects force FP usage with calls.
@@ -2154,11 +2152,10 @@ static bool frameTriviallyRequiresSP(const MachineFrameInfo &MFI) {
 bool SIFrameLowering::hasFPImpl(const MachineFunction &MF) const {
   const MachineFrameInfo &MFI = MF.getFrameInfo();
 
-  // For entry & chain functions we can use an immediate offset in most cases,
+  // For entry functions we can use an immediate offset in most cases,
   // so the presence of calls doesn't imply we need a distinct frame pointer.
   if (MFI.hasCalls() &&
-      !MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() &&
-      !MF.getInfo<SIMachineFunctionInfo>()->isChainFunction()) {
+      !MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
     // All offsets are unsigned, so need to be addressed in the same direction
     // as stack growth.
 
@@ -2167,9 +2164,7 @@ bool SIFrameLowering::hasFPImpl(const MachineFunction &MF) const {
     return MFI.getStackSize() != 0;
   }
 
-  return (frameTriviallyRequiresSP(MFI) &&
-          !MF.getInfo<SIMachineFunctionInfo>()->isChainFunction()) ||
-         MFI.isFrameAddressTaken() ||
+  return frameTriviallyRequiresSP(MFI) || MFI.isFrameAddressTaken() ||
          MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->hasStackRealignment(
              MF) ||
          mayReserveScratchForCWSR(MF) ||
@@ -2188,21 +2183,18 @@ bool SIFrameLowering::mayReserveScratchForCWSR(
 // register. We may need to initialize the stack pointer depending on the frame
 // properties, which logically overlaps many of the cases where an ordinary
 // function would require an FP.
-// Also used for chain functions. While not technically entry functions, chain
-// functions may need to set up a stack pointer in some situations.
 bool SIFrameLowering::requiresStackPointerReference(
     const MachineFunction &MF) const {
   // Callable functions always require a stack pointer reference.
-  assert((MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() ||
-          MF.getInfo<SIMachineFunctionInfo>()->isChainFunction()) &&
-         "only expected to call this for entry points and chain functions");
+  assert(MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction() &&
+         "only expected to call this for entry points functions");
 
   const MachineFrameInfo &MFI = MF.getFrameInfo();
 
   // Entry points ordinarily don't need to initialize SP. We have to set it up
-  // for callees if there are any. Also note tail calls are impossible/don't
-  // make any sense for kernels.
-  if (MFI.hasCalls())
+  // for callees if there are any. Also note tail calls are only possible via
+  // the `llvm.amdgcn.cs.chain` intrinsic.
+  if (MFI.hasCalls() || MFI.hasTailCall())
     return true;
 
   // We still need to initialize the SP if we're doing anything weird that

diff  --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 25a28ec471913..32efae69b20c8 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -93,20 +93,7 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const Function &F,
     MinNumAGPRs = MinNumAGPRAttr;
   }
 
-  if (AMDGPU::isChainCC(CC)) {
-    // Chain functions don't receive an SP from their caller, but are free to
-    // set one up. For now, we can use s32 to match what amdgpu_gfx functions
-    // would use if called, but this can be revisited.
-    // FIXME: Only reserve this if we actually need it.
-    StackPtrOffsetReg = AMDGPU::SGPR32;
-
-    ScratchRSrcReg = AMDGPU::SGPR48_SGPR49_SGPR50_SGPR51;
-
-    ArgInfo.PrivateSegmentBuffer =
-        ArgDescriptor::createRegister(ScratchRSrcReg);
-
-    ImplicitArgPtr = false;
-  } else if (!isEntryFunction()) {
+  if (!isEntryFunction()) {
     if (CC != CallingConv::AMDGPU_Gfx &&
         CC != CallingConv::AMDGPU_Gfx_WholeWave)
       ArgInfo = AMDGPUFunctionArgInfo::FixedABIFunctionInfo;
@@ -117,13 +104,16 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const Function &F,
     if (!ST.hasFlatScratchEnabled()) {
       // Non-entry functions have no special inputs for now, other registers
       // required for scratch access.
-      ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
+      ScratchRSrcReg = AMDGPU::isChainCC(CC)
+                           ? AMDGPU::SGPR48_SGPR49_SGPR50_SGPR51
+                           : ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
 
       ArgInfo.PrivateSegmentBuffer =
         ArgDescriptor::createRegister(ScratchRSrcReg);
     }
 
-    if (!F.hasFnAttribute("amdgpu-no-implicitarg-ptr"))
+    if (!F.hasFnAttribute("amdgpu-no-implicitarg-ptr") &&
+        !AMDGPU::isChainCC(CC))
       ImplicitArgPtr = true;
   } else {
     ImplicitArgPtr = false;

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-cc.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-cc.ll
index 2889f37a65d97..89d5d3dc18ea8 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-cc.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-cc.ll
@@ -39,7 +39,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_simple_call(<4 x i32> inreg %sgpr,
 ; GISEL-GFX11-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GISEL-GFX11-NEXT:    s_mov_b32 s4, use at abs32@lo
 ; GISEL-GFX11-NEXT:    s_mov_b32 s5, use at abs32@hi
-; GISEL-GFX11-NEXT:    s_mov_b32 s32, 0
+; GISEL-GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GISEL-GFX11-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; GISEL-GFX11-NEXT:    s_endpgm
 ;
@@ -58,7 +58,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_simple_call(<4 x i32> inreg %sgpr,
 ; GISEL-GFX10-NEXT:    s_mov_b32 s4, use at abs32@lo
 ; GISEL-GFX10-NEXT:    s_mov_b32 s5, use at abs32@hi
 ; GISEL-GFX10-NEXT:    s_mov_b64 s[2:3], s[50:51]
-; GISEL-GFX10-NEXT:    s_mov_b32 s32, 0
 ; GISEL-GFX10-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; GISEL-GFX10-NEXT:    s_endpgm
 ;
@@ -71,7 +70,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_simple_call(<4 x i32> inreg %sgpr,
 ; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s5, use at abs32@hi
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, use at abs32@lo
-; DAGISEL-GFX11-NEXT:    s_mov_b32 s32, 0
+; DAGISEL-GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; DAGISEL-GFX11-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; DAGISEL-GFX11-NEXT:    s_endpgm
 ;
@@ -90,7 +89,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_simple_call(<4 x i32> inreg %sgpr,
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, use at abs32@hi
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, use at abs32@lo
 ; DAGISEL-GFX10-NEXT:    s_mov_b64 s[2:3], s[50:51]
-; DAGISEL-GFX10-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL-GFX10-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; DAGISEL-GFX10-NEXT:    s_endpgm
   call amdgpu_gfx void @use(<4 x i32> %sgpr, <4 x i32> %vgpr)
@@ -101,8 +99,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_spill(<24 x i32> inreg %sgprs, <24
 ; GISEL-GFX11-LABEL: amdgpu_cs_chain_spill:
 ; GISEL-GFX11:       ; %bb.0:
 ; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT:    s_mov_b32 s32, 0
-; GISEL-GFX11-NEXT:    v_dual_mov_b32 v32, v8 :: v_dual_mov_b32 v33, v9
 ; GISEL-GFX11-NEXT:    s_add_u32 s24, s32, 4
 ; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32
 ; GISEL-GFX11-NEXT:    scratch_store_b32 off, v17, s24
@@ -123,6 +119,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_spill(<24 x i32> inreg %sgprs, <24
 ; GISEL-GFX11-NEXT:    scratch_store_b32 off, v24, s24
 ; GISEL-GFX11-NEXT:    scratch_store_b32 off, v25, s25
 ; GISEL-GFX11-NEXT:    s_add_u32 s24, s32, 40
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v32, v8 :: v_dual_mov_b32 v33, v9
 ; GISEL-GFX11-NEXT:    v_dual_mov_b32 v34, v10 :: v_dual_mov_b32 v35, v11
 ; GISEL-GFX11-NEXT:    v_dual_mov_b32 v36, v12 :: v_dual_mov_b32 v37, v13
 ; GISEL-GFX11-NEXT:    v_dual_mov_b32 v38, v14 :: v_dual_mov_b32 v39, v15
@@ -170,7 +167,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_spill(<24 x i32> inreg %sgprs, <24
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v37, v13
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v38, v14
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v39, v15
-; GISEL-GFX10-NEXT:    s_mov_b32 s32, 0
 ; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32
 ; GISEL-GFX10-NEXT:    buffer_store_dword v17, off, s[48:51], s32 offset:4
 ; GISEL-GFX10-NEXT:    buffer_store_dword v18, off, s[48:51], s32 offset:8
@@ -229,8 +225,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_spill(<24 x i32> inreg %sgprs, <24
 ; DAGISEL-GFX11-LABEL: amdgpu_cs_chain_spill:
 ; DAGISEL-GFX11:       ; %bb.0:
 ; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX11-NEXT:    s_mov_b32 s32, 0
-; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v32, v15 :: v_dual_mov_b32 v33, v14
 ; DAGISEL-GFX11-NEXT:    s_add_i32 s24, s32, 60
 ; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32
 ; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v31, s24
@@ -251,6 +245,7 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_spill(<24 x i32> inreg %sgprs, <24
 ; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v24, s24
 ; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v23, s25
 ; DAGISEL-GFX11-NEXT:    s_add_i32 s24, s32, 24
+; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v32, v15 :: v_dual_mov_b32 v33, v14
 ; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v34, v13 :: v_dual_mov_b32 v35, v12
 ; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v36, v11 :: v_dual_mov_b32 v37, v10
 ; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v38, v9 :: v_dual_mov_b32 v39, v8
@@ -298,7 +293,6 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_spill(<24 x i32> inreg %sgprs, <24
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v37, v10
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v38, v9
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v39, v8
-; DAGISEL-GFX10-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32
 ; DAGISEL-GFX10-NEXT:    buffer_store_dword v17, off, s[48:51], s32 offset:4
 ; DAGISEL-GFX10-NEXT:    buffer_store_dword v18, off, s[48:51], s32 offset:8
@@ -362,11 +356,12 @@ define amdgpu_cs_chain void @alloca_and_call() {
 ; GISEL-GFX11:       ; %bb.0: ; %.entry
 ; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v0, 42
+; GISEL-GFX11-NEXT:    s_mov_b32 s33, s32
 ; GISEL-GFX11-NEXT:    s_mov_b32 s0, use at abs32@lo
 ; GISEL-GFX11-NEXT:    s_mov_b32 s1, use at abs32@hi
-; GISEL-GFX11-NEXT:    s_mov_b32 s32, 16
-; GISEL-GFX11-NEXT:    scratch_store_b32 off, v0, off
-; GISEL-GFX11-NEXT:    v_mov_b32_e32 v0, 0
+; GISEL-GFX11-NEXT:    s_add_i32 s32, s32, 16
+; GISEL-GFX11-NEXT:    scratch_store_b32 off, v0, s33
+; GISEL-GFX11-NEXT:    v_mov_b32_e32 v0, s33
 ; GISEL-GFX11-NEXT:    s_swappc_b64 s[30:31], s[0:1]
 ; GISEL-GFX11-NEXT:    s_endpgm
 ;
@@ -374,13 +369,14 @@ define amdgpu_cs_chain void @alloca_and_call() {
 ; GISEL-GFX10:       ; %bb.0: ; %.entry
 ; GISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v0, 42
+; GISEL-GFX10-NEXT:    s_mov_b32 s33, s32
 ; GISEL-GFX10-NEXT:    s_mov_b64 s[0:1], s[48:49]
 ; GISEL-GFX10-NEXT:    s_mov_b32 s4, use at abs32@lo
 ; GISEL-GFX10-NEXT:    s_mov_b32 s5, use at abs32@hi
+; GISEL-GFX10-NEXT:    buffer_store_dword v0, off, s[48:51], s33
+; GISEL-GFX10-NEXT:    v_lshrrev_b32_e64 v0, 5, s33
 ; GISEL-GFX10-NEXT:    s_mov_b64 s[2:3], s[50:51]
-; GISEL-GFX10-NEXT:    buffer_store_dword v0, off, s[48:51], 0
-; GISEL-GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; GISEL-GFX10-NEXT:    s_movk_i32 s32, 0x200
+; GISEL-GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GISEL-GFX10-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; GISEL-GFX10-NEXT:    s_endpgm
 ;
@@ -388,11 +384,12 @@ define amdgpu_cs_chain void @alloca_and_call() {
 ; DAGISEL-GFX11:       ; %bb.0: ; %.entry
 ; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v0, 42
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s33, s32
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s1, use at abs32@hi
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s0, use at abs32@lo
-; DAGISEL-GFX11-NEXT:    s_mov_b32 s32, 16
-; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v0, off
-; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v0, 0
+; DAGISEL-GFX11-NEXT:    s_add_i32 s32, s32, 16
+; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v0, s33
+; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v0, s33
 ; DAGISEL-GFX11-NEXT:    s_swappc_b64 s[30:31], s[0:1]
 ; DAGISEL-GFX11-NEXT:    s_endpgm
 ;
@@ -400,13 +397,14 @@ define amdgpu_cs_chain void @alloca_and_call() {
 ; DAGISEL-GFX10:       ; %bb.0: ; %.entry
 ; DAGISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v0, 42
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s33, s32
 ; DAGISEL-GFX10-NEXT:    s_mov_b64 s[0:1], s[48:49]
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, use at abs32@hi
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, use at abs32@lo
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v0, off, s[48:51], s33
+; DAGISEL-GFX10-NEXT:    v_lshrrev_b32_e64 v0, 5, s33
 ; DAGISEL-GFX10-NEXT:    s_mov_b64 s[2:3], s[50:51]
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v0, off, s[48:51], 0
-; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; DAGISEL-GFX10-NEXT:    s_movk_i32 s32, 0x200
+; DAGISEL-GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; DAGISEL-GFX10-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; DAGISEL-GFX10-NEXT:    s_endpgm
 .entry:
@@ -429,6 +427,7 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %a, <3 x i32> %b) {
 ; GISEL-GFX11-NEXT:    v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v1
 ; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; GISEL-GFX11-NEXT:    s_mov_b32 s0, s3
+; GISEL-GFX11-NEXT:    s_mov_b32 s32, 0
 ; GISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
 ; GISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
 ;
@@ -442,6 +441,7 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %a, <3 x i32> %b) {
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v10, v2
 ; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; GISEL-GFX10-NEXT:    s_mov_b32 s32, 0
 ; GISEL-GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GISEL-GFX10-NEXT:    s_bitset0_b32 s103, 21
 ; GISEL-GFX10-NEXT:    s_add_u32 s100, s100, s3
@@ -469,6 +469,7 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %a, <3 x i32> %b) {
 ; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v1
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s0, s3
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
 ; DAGISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
 ;
@@ -482,6 +483,7 @@ define amdgpu_cs void @cs_to_chain(<3 x i32> inreg %a, <3 x i32> %b) {
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v10, v2
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL-GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; DAGISEL-GFX10-NEXT:    s_bitset0_b32 s103, 21
 ; DAGISEL-GFX10-NEXT:    s_add_u32 s100, s100, s3
@@ -512,6 +514,7 @@ define amdgpu_cs void @cs_to_chain_nonuniform(<3 x i32> %a, <3 x i32> %b) {
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v10, v5
 ; GISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; GISEL-GFX11-NEXT:    s_mov_b32 s32, 0
 ; GISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
 ; GISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
 ;
@@ -527,6 +530,7 @@ define amdgpu_cs void @cs_to_chain_nonuniform(<3 x i32> %a, <3 x i32> %b) {
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v10, v5
 ; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; GISEL-GFX10-NEXT:    s_mov_b32 s32, 0
 ; GISEL-GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GISEL-GFX10-NEXT:    s_bitset0_b32 s103, 21
 ; GISEL-GFX10-NEXT:    s_add_u32 s100, s100, s0
@@ -546,6 +550,7 @@ define amdgpu_cs void @cs_to_chain_nonuniform(<3 x i32> %a, <3 x i32> %b) {
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v10, v5
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
 ; DAGISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
 ;
@@ -561,6 +566,7 @@ define amdgpu_cs void @cs_to_chain_nonuniform(<3 x i32> %a, <3 x i32> %b) {
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v10, v5
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL-GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; DAGISEL-GFX10-NEXT:    s_bitset0_b32 s103, 21
 ; DAGISEL-GFX10-NEXT:    s_add_u32 s100, s100, s0
@@ -639,6 +645,56 @@ define amdgpu_cs_chain void @chain_to_chain(<3 x i32> inreg %a, <3 x i32> %b) {
   unreachable
 }
 
+define amdgpu_cs_chain void @chain_to_chain_local(<3 x i32> inreg %a, <3 x i32> %b) {
+; GISEL-GFX11-LABEL: chain_to_chain_local:
+; GISEL-GFX11:       ; %bb.0:
+; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-NEXT:    v_mov_b32_e32 v0, 5
+; GISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; GISEL-GFX11-NEXT:    scratch_store_b32 off, v0, s32 dlc
+; GISEL-GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
+; GISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
+;
+; GISEL-GFX10-LABEL: chain_to_chain_local:
+; GISEL-GFX10:       ; %bb.0:
+; GISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v0, 5
+; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; GISEL-GFX10-NEXT:    buffer_store_dword v0, off, s[48:51], s32
+; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GISEL-GFX10-NEXT:    s_mov_b32 exec_lo, -1
+; GISEL-GFX10-NEXT:    s_setpc_b64 s[4:5]
+;
+; DAGISEL-GFX11-LABEL: chain_to_chain_local:
+; DAGISEL-GFX11:       ; %bb.0:
+; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v0, 5
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v0, s32 dlc
+; DAGISEL-GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; DAGISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
+; DAGISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
+;
+; DAGISEL-GFX10-LABEL: chain_to_chain_local:
+; DAGISEL-GFX10:       ; %bb.0:
+; DAGISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v0, 5
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v0, off, s[48:51], s32
+; DAGISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; DAGISEL-GFX10-NEXT:    s_mov_b32 exec_lo, -1
+; DAGISEL-GFX10-NEXT:    s_setpc_b64 s[4:5]
+  %v = alloca i32, i32 4, addrspace(5)
+  store volatile i32 5, ptr addrspace(5) %v
+  call void(ptr, i32, <3 x i32>, <3 x i32>, i32, ...) @llvm.amdgcn.cs.chain.v3i32(ptr @chain_callee, i32 -1, <3 x i32> inreg %a, <3 x i32> %b, i32 0)
+  unreachable
+}
+
 define amdgpu_cs_chain void @chain_to_chain_wwm(<3 x i32> inreg %a, <3 x i32> %b) {
 ; GISEL-GFX11-LABEL: chain_to_chain_wwm:
 ; GISEL-GFX11:       ; %bb.0:
@@ -926,8 +982,8 @@ define amdgpu_cs_chain void @chain_to_chain_more_args(<3 x i32> inreg %a, <3 x i
   unreachable
 }
 
-define amdgpu_cs_chain void @amdgpu_cs_chain_dont_realign_stack(i32 %idx) {
-; GISEL-GFX11-LABEL: amdgpu_cs_chain_dont_realign_stack:
+define amdgpu_cs_chain void @amdgpu_cs_chain_realign_stack(i32 %idx) {
+; GISEL-GFX11-LABEL: amdgpu_cs_chain_realign_stack:
 ; GISEL-GFX11:       ; %bb.0:
 ; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GISEL-GFX11-NEXT:    s_mov_b32 s3, 4
@@ -935,22 +991,31 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_dont_realign_stack(i32 %idx) {
 ; GISEL-GFX11-NEXT:    s_mov_b32 s1, 2
 ; GISEL-GFX11-NEXT:    s_mov_b32 s0, 1
 ; GISEL-GFX11-NEXT:    v_lshlrev_b32_e32 v0, 4, v8
-; GISEL-GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GISEL-GFX11-NEXT:    v_mov_b32_e32 v4, v0
+; GISEL-GFX11-NEXT:    s_add_i32 s33, s32, 31
+; GISEL-GFX11-NEXT:    s_mov_b32 s34, s32
+; GISEL-GFX11-NEXT:    s_and_not1_b32 s33, s33, 31
+; GISEL-GFX11-NEXT:    s_addk_i32 s32, 0xc0
+; GISEL-GFX11-NEXT:    v_add_nc_u32_e32 v4, s33, v0
 ; GISEL-GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
 ; GISEL-GFX11-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
 ; GISEL-GFX11-NEXT:    scratch_store_b128 v4, v[0:3], off dlc
 ; GISEL-GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GISEL-GFX11-NEXT:    s_endpgm
 ;
-; GISEL-GFX10-LABEL: amdgpu_cs_chain_dont_realign_stack:
+; GISEL-GFX10-LABEL: amdgpu_cs_chain_realign_stack:
 ; GISEL-GFX10:       ; %bb.0:
 ; GISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX10-NEXT:    s_add_i32 s33, s32, 0x3e0
 ; GISEL-GFX10-NEXT:    v_lshlrev_b32_e32 v0, 4, v8
+; GISEL-GFX10-NEXT:    s_and_b32 s33, s33, 0xfffffc00
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v1, 1
-; GISEL-GFX10-NEXT:    v_mov_b32_e32 v2, 2
+; GISEL-GFX10-NEXT:    v_lshrrev_b32_e64 v2, 5, s33
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v3, 3
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v4, 4
+; GISEL-GFX10-NEXT:    s_mov_b32 s34, s32
+; GISEL-GFX10-NEXT:    s_addk_i32 s32, 0x1800
+; GISEL-GFX10-NEXT:    v_add_nc_u32_e32 v0, v0, v2
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v2, 2
 ; GISEL-GFX10-NEXT:    buffer_store_dword v1, v0, s[48:51], 0 offen
 ; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GISEL-GFX10-NEXT:    buffer_store_dword v2, v0, s[48:51], 0 offen offset:4
@@ -961,24 +1026,33 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_dont_realign_stack(i32 %idx) {
 ; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GISEL-GFX10-NEXT:    s_endpgm
 ;
-; DAGISEL-GFX11-LABEL: amdgpu_cs_chain_dont_realign_stack:
+; DAGISEL-GFX11-LABEL: amdgpu_cs_chain_realign_stack:
 ; DAGISEL-GFX11:       ; %bb.0:
 ; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX11-NEXT:    s_add_i32 s33, s32, 31
 ; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v0, 1 :: v_dual_mov_b32 v1, 2
+; DAGISEL-GFX11-NEXT:    s_and_not1_b32 s33, s33, 31
 ; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v2, 3 :: v_dual_mov_b32 v3, 4
-; DAGISEL-GFX11-NEXT:    v_lshl_add_u32 v4, v8, 4, 0
+; DAGISEL-GFX11-NEXT:    v_lshl_add_u32 v4, v8, 4, s33
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s34, s32
+; DAGISEL-GFX11-NEXT:    s_addk_i32 s32, 0xc0
 ; DAGISEL-GFX11-NEXT:    scratch_store_b128 v4, v[0:3], off dlc
 ; DAGISEL-GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; DAGISEL-GFX11-NEXT:    s_endpgm
 ;
-; DAGISEL-GFX10-LABEL: amdgpu_cs_chain_dont_realign_stack:
+; DAGISEL-GFX10-LABEL: amdgpu_cs_chain_realign_stack:
 ; DAGISEL-GFX10:       ; %bb.0:
 ; DAGISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX10-NEXT:    s_add_i32 s33, s32, 0x3e0
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v0, 4
-; DAGISEL-GFX10-NEXT:    v_lshl_add_u32 v1, v8, 4, 0
-; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v2, 3
+; DAGISEL-GFX10-NEXT:    s_and_b32 s33, s33, 0xfffffc00
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v3, 2
+; DAGISEL-GFX10-NEXT:    v_lshrrev_b32_e64 v2, 5, s33
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v4, 1
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s34, s32
+; DAGISEL-GFX10-NEXT:    s_addk_i32 s32, 0x1800
+; DAGISEL-GFX10-NEXT:    v_lshl_add_u32 v1, v8, 4, v2
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v2, 3
 ; DAGISEL-GFX10-NEXT:    buffer_store_dword v0, v1, s[48:51], 0 offen offset:12
 ; DAGISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; DAGISEL-GFX10-NEXT:    buffer_store_dword v2, v1, s[48:51], 0 offen offset:8
@@ -994,6 +1068,119 @@ define amdgpu_cs_chain void @amdgpu_cs_chain_dont_realign_stack(i32 %idx) {
   ret void
 }
 
+define amdgpu_cs_chain void @amdgpu_cs_chain_realign_stack_chain_call(i32 %idx, <3 x i32> inreg %a, <3 x i32> %b) {
+; GISEL-GFX11-LABEL: amdgpu_cs_chain_realign_stack_chain_call:
+; GISEL-GFX11:       ; %bb.0:
+; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-NEXT:    s_mov_b32 s7, 4
+; GISEL-GFX11-NEXT:    s_mov_b32 s6, 3
+; GISEL-GFX11-NEXT:    s_mov_b32 s5, 2
+; GISEL-GFX11-NEXT:    s_mov_b32 s4, 1
+; GISEL-GFX11-NEXT:    v_lshlrev_b32_e32 v4, 4, v8
+; GISEL-GFX11-NEXT:    s_add_i32 s33, s32, 31
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s7
+; GISEL-GFX11-NEXT:    s_and_not1_b32 s33, s33, 31
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s6
+; GISEL-GFX11-NEXT:    v_add_nc_u32_e32 v4, s33, v4
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v8, v9 :: v_dual_mov_b32 v9, v10
+; GISEL-GFX11-NEXT:    v_mov_b32_e32 v10, v11
+; GISEL-GFX11-NEXT:    s_mov_b32 s34, s32
+; GISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; GISEL-GFX11-NEXT:    s_addk_i32 s32, 0xc0
+; GISEL-GFX11-NEXT:    scratch_store_b128 v4, v[0:3], off dlc
+; GISEL-GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; GISEL-GFX11-NEXT:    s_mov_b32 s32, s34
+; GISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
+; GISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
+;
+; GISEL-GFX10-LABEL: amdgpu_cs_chain_realign_stack_chain_call:
+; GISEL-GFX10:       ; %bb.0:
+; GISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX10-NEXT:    s_add_i32 s33, s32, 0x3e0
+; GISEL-GFX10-NEXT:    v_lshlrev_b32_e32 v0, 4, v8
+; GISEL-GFX10-NEXT:    s_and_b32 s33, s33, 0xfffffc00
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v1, 1
+; GISEL-GFX10-NEXT:    v_lshrrev_b32_e64 v2, 5, s33
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v9
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v9, v10
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v10, v11
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v3, 3
+; GISEL-GFX10-NEXT:    v_add_nc_u32_e32 v0, v0, v2
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v2, 2
+; GISEL-GFX10-NEXT:    s_mov_b32 s34, s32
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v4, 4
+; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; GISEL-GFX10-NEXT:    s_addk_i32 s32, 0x1800
+; GISEL-GFX10-NEXT:    buffer_store_dword v1, v0, s[48:51], 0 offen
+; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GISEL-GFX10-NEXT:    buffer_store_dword v2, v0, s[48:51], 0 offen offset:4
+; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GISEL-GFX10-NEXT:    buffer_store_dword v3, v0, s[48:51], 0 offen offset:8
+; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GISEL-GFX10-NEXT:    buffer_store_dword v4, v0, s[48:51], 0 offen offset:12
+; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GISEL-GFX10-NEXT:    s_mov_b32 s32, s34
+; GISEL-GFX10-NEXT:    s_mov_b32 exec_lo, -1
+; GISEL-GFX10-NEXT:    s_setpc_b64 s[4:5]
+;
+; DAGISEL-GFX11-LABEL: amdgpu_cs_chain_realign_stack_chain_call:
+; DAGISEL-GFX11:       ; %bb.0:
+; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX11-NEXT:    s_add_i32 s33, s32, 31
+; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v0, 1 :: v_dual_mov_b32 v1, 2
+; DAGISEL-GFX11-NEXT:    s_and_not1_b32 s33, s33, 31
+; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v2, 3 :: v_dual_mov_b32 v3, 4
+; DAGISEL-GFX11-NEXT:    v_lshl_add_u32 v4, v8, 4, s33
+; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v8, v9 :: v_dual_mov_b32 v9, v10
+; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v10, v11
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s34, s32
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; DAGISEL-GFX11-NEXT:    s_addk_i32 s32, 0xc0
+; DAGISEL-GFX11-NEXT:    scratch_store_b128 v4, v[0:3], off dlc
+; DAGISEL-GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s32, s34
+; DAGISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
+; DAGISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
+;
+; DAGISEL-GFX10-LABEL: amdgpu_cs_chain_realign_stack_chain_call:
+; DAGISEL-GFX10:       ; %bb.0:
+; DAGISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL-GFX10-NEXT:    s_add_i32 s33, s32, 0x3e0
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v2, 3
+; DAGISEL-GFX10-NEXT:    s_and_b32 s33, s33, 0xfffffc00
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v3, 2
+; DAGISEL-GFX10-NEXT:    v_lshrrev_b32_e64 v1, 5, s33
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s34, s32
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v4, 1
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
+; DAGISEL-GFX10-NEXT:    v_lshl_add_u32 v0, v8, 4, v1
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v1, 4
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v9
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v9, v10
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v10, v11
+; DAGISEL-GFX10-NEXT:    s_addk_i32 s32, 0x1800
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v1, v0, s[48:51], 0 offen offset:12
+; DAGISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v2, v0, s[48:51], 0 offen offset:8
+; DAGISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v3, v0, s[48:51], 0 offen offset:4
+; DAGISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v4, v0, s[48:51], 0 offen
+; DAGISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s32, s34
+; DAGISEL-GFX10-NEXT:    s_mov_b32 exec_lo, -1
+; DAGISEL-GFX10-NEXT:    s_setpc_b64 s[4:5]
+  %alloca.align32 = alloca [8 x <4 x i32>], align 32, addrspace(5)
+  %gep0 = getelementptr inbounds [8 x <4 x i32>], ptr addrspace(5) %alloca.align32, i32 0, i32 %idx
+  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr addrspace(5) %gep0, align 32
+  call void(ptr, i32, <3 x i32>, <3 x i32>, i32, ...) @llvm.amdgcn.cs.chain.v3i32(ptr @chain_callee, i32 -1, <3 x i32> inreg %a, <3 x i32> %b, i32 0)
+  unreachable
+}
+
 declare void @llvm.amdgcn.cs.chain.v2i32(ptr, i32, <2 x i32>, <2 x i32>, i32, ...)
 declare void @llvm.amdgcn.cs.chain.v3i32(ptr, i32, <3 x i32>, <3 x i32>, i32, ...)
 declare void @llvm.amdgcn.cs.chain.v4i32(ptr, i32, <4 x i32>, <4 x i32>, i32, ...)

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll
index 87a06cfe75265..236fcc95f223f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s 2>&1 | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=-wavefrontsize32,+wavefrontsize64 < %s 2>&1 | FileCheck -check-prefix=GFX12 %s
 ; RUN: llc -mtriple=amdgcn -mcpu=gfx942 < %s 2>&1 | FileCheck -check-prefix=GFX942 %s
 
 ; These situations are "special" in that they either have an alloca that is not
@@ -17,18 +17,20 @@ define amdgpu_cs_chain void @test_alloca() {
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    v_mov_b32_e32 v0, 0
-; GFX12-NEXT:    s_mov_b32 s32, 16
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_add_co_i32 s32, s32, 16
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_mov_b32 s0, s32
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_add_co_i32 s32, s0, 0x200
+; GFX12-NEXT:    s_add_co_i32 s32, s0, 0x400
 ; GFX12-NEXT:    scratch_store_b32 off, v0, s0
 ; GFX12-NEXT:    s_endpgm
 ;
 ; GFX942-LABEL: test_alloca:
 ; GFX942:       ; %bb.0: ; %.entry
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX942-NEXT:    s_mov_b32 s32, 16
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_add_i32 s32, s32, 16
 ; GFX942-NEXT:    s_mov_b32 s0, s32
 ; GFX942-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX942-NEXT:    s_add_i32 s32, s0, 0x400
@@ -43,6 +45,65 @@ SW_C:                                             ; preds = %.entry
   ret void
 }
 
+define amdgpu_cs_chain void @test_alloca_chain() {
+; GFX12-LABEL: test_alloca_chain:
+; GFX12:       ; %bb.0: ; %.entry
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_add_co_i32 s32, s32, 16
+; GFX12-NEXT:    s_getpc_b64 s[0:1]
+; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12-NEXT:    s_sext_i32_i16 s1, s1
+; GFX12-NEXT:    s_add_co_u32 s0, s0, test_fp_all at gotpcrel32@lo+12
+; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12-NEXT:    s_add_co_ci_u32 s1, s1, test_fp_all at gotpcrel32@hi+24
+; GFX12-NEXT:    v_mov_b32_e32 v0, 0
+; GFX12-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX12-NEXT:    v_mov_b32_e32 v8, 57
+; GFX12-NEXT:    s_mov_b32 s0, s32
+; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12-NEXT:    s_add_co_i32 s32, s0, 0x400
+; GFX12-NEXT:    scratch_store_b32 off, v0, s0
+; GFX12-NEXT:    s_mov_b32 s0, 51
+; GFX12-NEXT:    s_mov_b32 s32, s33
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_mov_b64 exec, -1
+; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12-NEXT:    s_setpc_b64 s[2:3]
+;
+; GFX942-LABEL: test_alloca_chain:
+; GFX942:       ; %bb.0: ; %.entry
+; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_add_i32 s32, s32, 16
+; GFX942-NEXT:    s_mov_b32 s4, s32
+; GFX942-NEXT:    s_add_i32 s32, s4, 0x400
+; GFX942-NEXT:    s_getpc_b64 s[0:1]
+; GFX942-NEXT:    s_add_u32 s0, s0, test_fp_all at gotpcrel32@lo+4
+; GFX942-NEXT:    s_addc_u32 s1, s1, test_fp_all at gotpcrel32@hi+12
+; GFX942-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX942-NEXT:    v_mov_b32_e32 v0, 0
+; GFX942-NEXT:    s_mov_b32 s0, 51
+; GFX942-NEXT:    v_mov_b32_e32 v8, 57
+; GFX942-NEXT:    scratch_store_dword off, v0, s4
+; GFX942-NEXT:    s_mov_b32 s32, s33
+; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942-NEXT:    s_mov_b64 exec, -1
+; GFX942-NEXT:    s_setpc_b64 s[2:3]
+.entry:
+  br label %SW_C
+
+SW_C:                                             ; preds = %.entry
+  %v = alloca i32, i32 1, align 4, addrspace(5)
+  store i32 0, ptr addrspace(5) %v, align 4
+  call void(ptr, i64, i32, i32, i32, ...) @llvm.amdgcn.cs.chain.i32(ptr @test_fp_all, i64 -1, i32 inreg 51, i32 57, i32 0)
+  unreachable
+}
+
 define amdgpu_cs_chain void @test_alloca_var_uniform(i32 inreg %count) {
 ; GFX12-LABEL: test_alloca_var_uniform:
 ; GFX12:       ; %bb.0:
@@ -53,12 +114,13 @@ define amdgpu_cs_chain void @test_alloca_var_uniform(i32 inreg %count) {
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    s_lshl2_add_u32 s0, s0, 15
 ; GFX12-NEXT:    v_mov_b32_e32 v0, 0
-; GFX12-NEXT:    s_mov_b32 s32, 16
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_add_co_i32 s32, s32, 16
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_and_b32 s0, s0, -16
 ; GFX12-NEXT:    s_mov_b32 s1, s32
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_lshl_b32 s0, s0, 5
+; GFX12-NEXT:    s_lshl_b32 s0, s0, 6
 ; GFX12-NEXT:    scratch_store_b32 off, v0, s1
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_add_co_i32 s32, s1, s0
@@ -68,7 +130,8 @@ define amdgpu_cs_chain void @test_alloca_var_uniform(i32 inreg %count) {
 ; GFX942:       ; %bb.0:
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    s_lshl2_add_u32 s0, s0, 15
-; GFX942-NEXT:    s_mov_b32 s32, 16
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_add_i32 s32, s32, 16
 ; GFX942-NEXT:    s_and_b32 s0, s0, -16
 ; GFX942-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX942-NEXT:    s_lshl_b32 s0, s0, 6
@@ -90,27 +153,27 @@ define amdgpu_cs_chain void @test_alloca_var(i32 %count) {
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    v_lshl_add_u32 v0, v8, 2, 15
-; GFX12-NEXT:    s_mov_b32 s1, exec_lo
-; GFX12-NEXT:    s_mov_b32 s0, 0
-; GFX12-NEXT:    s_mov_b32 s32, 16
-; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT:    s_mov_b64 s[0:1], exec
+; GFX12-NEXT:    s_mov_b32 s2, 0
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_add_co_i32 s32, s32, 16
 ; GFX12-NEXT:    v_and_b32_e32 v1, -16, v0
 ; GFX12-NEXT:    v_mov_b32_e32 v0, 0
-; GFX12-NEXT:  .LBB2_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT:  .LBB3_1: ; =>This Inner Loop Header: Depth=1
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_ctz_i32_b32 s2, s1
+; GFX12-NEXT:    s_ctz_i32_b64 s3, s[0:1]
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    v_readlane_b32 s3, v1, s2
-; GFX12-NEXT:    s_bitset0_b32 s1, s2
-; GFX12-NEXT:    s_max_u32 s0, s0, s3
+; GFX12-NEXT:    v_readlane_b32 s4, v1, s3
+; GFX12-NEXT:    s_bitset0_b64 s[0:1], s3
+; GFX12-NEXT:    s_max_u32 s2, s2, s4
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX12-NEXT:    s_cbranch_scc1 .LBB2_1
+; GFX12-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX12-NEXT:    s_cbranch_scc1 .LBB3_1
 ; GFX12-NEXT:  ; %bb.2:
-; GFX12-NEXT:    s_mov_b32 s1, s32
+; GFX12-NEXT:    s_mov_b32 s0, s32
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    v_lshl_add_u32 v1, s0, 5, s1
-; GFX12-NEXT:    scratch_store_b32 off, v0, s1
+; GFX12-NEXT:    v_lshl_add_u32 v1, s2, 6, s0
+; GFX12-NEXT:    scratch_store_b32 off, v0, s0
 ; GFX12-NEXT:    v_readfirstlane_b32 s32, v1
 ; GFX12-NEXT:    s_endpgm
 ;
@@ -122,14 +185,15 @@ define amdgpu_cs_chain void @test_alloca_var(i32 %count) {
 ; GFX942-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX942-NEXT:    s_mov_b64 s[0:1], exec
 ; GFX942-NEXT:    s_mov_b32 s2, 0
-; GFX942-NEXT:    s_mov_b32 s32, 16
-; GFX942-NEXT:  .LBB2_1: ; =>This Inner Loop Header: Depth=1
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_add_i32 s32, s32, 16
+; GFX942-NEXT:  .LBB3_1: ; =>This Inner Loop Header: Depth=1
 ; GFX942-NEXT:    s_ff1_i32_b64 s3, s[0:1]
 ; GFX942-NEXT:    v_readlane_b32 s4, v1, s3
 ; GFX942-NEXT:    s_bitset0_b64 s[0:1], s3
 ; GFX942-NEXT:    s_max_u32 s2, s2, s4
 ; GFX942-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX942-NEXT:    s_cbranch_scc1 .LBB2_1
+; GFX942-NEXT:    s_cbranch_scc1 .LBB3_1
 ; GFX942-NEXT:  ; %bb.2:
 ; GFX942-NEXT:    s_mov_b32 s0, s32
 ; GFX942-NEXT:    v_mov_b32_e32 v1, s0
@@ -150,6 +214,8 @@ define amdgpu_cs_chain void @test_alloca_and_call() {
 ; GFX12-NEXT:    s_wait_samplecnt 0x0
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_add_co_i32 s32, s32, 16
 ; GFX12-NEXT:    s_getpc_b64 s[0:1]
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_sext_i32_i16 s1, s1
@@ -158,11 +224,9 @@ define amdgpu_cs_chain void @test_alloca_and_call() {
 ; GFX12-NEXT:    s_add_co_ci_u32 s1, s1, foo at gotpcrel32@hi+24
 ; GFX12-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX12-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
-; GFX12-NEXT:    s_mov_b32 s32, 16
-; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_mov_b32 s2, s32
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_add_co_i32 s32, s2, 0x200
+; GFX12-NEXT:    s_add_co_i32 s32, s2, 0x400
 ; GFX12-NEXT:    scratch_store_b32 off, v0, s2
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
@@ -172,11 +236,12 @@ define amdgpu_cs_chain void @test_alloca_and_call() {
 ; GFX942-LABEL: test_alloca_and_call:
 ; GFX942:       ; %bb.0: ; %.entry
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_add_i32 s32, s32, 16
 ; GFX942-NEXT:    s_getpc_b64 s[0:1]
 ; GFX942-NEXT:    s_add_u32 s0, s0, foo at gotpcrel32@lo+4
 ; GFX942-NEXT:    s_addc_u32 s1, s1, foo at gotpcrel32@hi+12
 ; GFX942-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
-; GFX942-NEXT:    s_mov_b32 s32, 16
 ; GFX942-NEXT:    s_mov_b32 s2, s32
 ; GFX942-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX942-NEXT:    s_add_i32 s32, s2, 0x400
@@ -202,6 +267,8 @@ define amdgpu_cs_chain void @test_alloca_and_call_var_uniform(i32 inreg %count)
 ; GFX12-NEXT:    s_wait_samplecnt 0x0
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_add_co_i32 s32, s32, 16
 ; GFX12-NEXT:    s_getpc_b64 s[2:3]
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_sext_i32_i16 s3, s3
@@ -211,14 +278,13 @@ define amdgpu_cs_chain void @test_alloca_and_call_var_uniform(i32 inreg %count)
 ; GFX12-NEXT:    s_lshl2_add_u32 s0, s0, 15
 ; GFX12-NEXT:    s_load_b64 s[2:3], s[2:3], 0x0
 ; GFX12-NEXT:    v_mov_b32_e32 v0, 0
-; GFX12-NEXT:    s_mov_b32 s32, 16
 ; GFX12-NEXT:    s_and_b32 s0, s0, -16
-; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_mov_b32 s1, s32
-; GFX12-NEXT:    s_lshl_b32 s0, s0, 5
-; GFX12-NEXT:    scratch_store_b32 off, v0, s1
+; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12-NEXT:    s_lshl_b32 s0, s0, 6
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_add_co_i32 s32, s1, s0
+; GFX12-NEXT:    scratch_store_b32 off, v0, s1
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_swappc_b64 s[30:31], s[2:3]
@@ -229,12 +295,13 @@ define amdgpu_cs_chain void @test_alloca_and_call_var_uniform(i32 inreg %count)
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    s_lshl2_add_u32 s0, s0, 15
 ; GFX942-NEXT:    s_and_b32 s0, s0, -16
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_add_i32 s32, s32, 16
 ; GFX942-NEXT:    s_lshl_b32 s2, s0, 6
 ; GFX942-NEXT:    s_getpc_b64 s[0:1]
 ; GFX942-NEXT:    s_add_u32 s0, s0, foo at gotpcrel32@lo+4
 ; GFX942-NEXT:    s_addc_u32 s1, s1, foo at gotpcrel32@hi+12
 ; GFX942-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
-; GFX942-NEXT:    s_mov_b32 s32, 16
 ; GFX942-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX942-NEXT:    s_mov_b32 s3, s32
 ; GFX942-NEXT:    s_add_i32 s32, s3, s2
@@ -257,37 +324,37 @@ define amdgpu_cs_chain void @test_alloca_and_call_var(i32 %count) {
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    v_lshl_add_u32 v0, v8, 2, 15
-; GFX12-NEXT:    s_mov_b32 s1, exec_lo
-; GFX12-NEXT:    s_mov_b32 s0, 0
-; GFX12-NEXT:    s_mov_b32 s32, 16
-; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT:    s_mov_b64 s[0:1], exec
+; GFX12-NEXT:    s_mov_b32 s2, 0
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_add_co_i32 s32, s32, 16
 ; GFX12-NEXT:    v_and_b32_e32 v1, -16, v0
 ; GFX12-NEXT:    v_mov_b32_e32 v0, 0
-; GFX12-NEXT:  .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT:  .LBB6_1: ; =>This Inner Loop Header: Depth=1
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_ctz_i32_b32 s2, s1
+; GFX12-NEXT:    s_ctz_i32_b64 s3, s[0:1]
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    v_readlane_b32 s3, v1, s2
-; GFX12-NEXT:    s_bitset0_b32 s1, s2
-; GFX12-NEXT:    s_max_u32 s0, s0, s3
+; GFX12-NEXT:    v_readlane_b32 s4, v1, s3
+; GFX12-NEXT:    s_bitset0_b64 s[0:1], s3
+; GFX12-NEXT:    s_max_u32 s2, s2, s4
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX12-NEXT:    s_cbranch_scc1 .LBB5_1
+; GFX12-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX12-NEXT:    s_cbranch_scc1 .LBB6_1
 ; GFX12-NEXT:  ; %bb.2:
-; GFX12-NEXT:    s_getpc_b64 s[2:3]
+; GFX12-NEXT:    s_getpc_b64 s[0:1]
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_sext_i32_i16 s3, s3
-; GFX12-NEXT:    s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
+; GFX12-NEXT:    s_sext_i32_i16 s1, s1
+; GFX12-NEXT:    s_add_co_u32 s0, s0, foo at gotpcrel32@lo+12
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
-; GFX12-NEXT:    s_mov_b32 s1, s32
-; GFX12-NEXT:    s_load_b64 s[2:3], s[2:3], 0x0
-; GFX12-NEXT:    v_lshl_add_u32 v1, s0, 5, s1
-; GFX12-NEXT:    scratch_store_b32 off, v0, s1
+; GFX12-NEXT:    s_add_co_ci_u32 s1, s1, foo at gotpcrel32@hi+24
+; GFX12-NEXT:    s_mov_b32 s3, s32
+; GFX12-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
+; GFX12-NEXT:    v_lshl_add_u32 v1, s2, 6, s3
+; GFX12-NEXT:    scratch_store_b32 off, v0, s3
 ; GFX12-NEXT:    v_readfirstlane_b32 s32, v1
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    s_wait_alu depctr_va_sdst(0)
-; GFX12-NEXT:    s_swappc_b64 s[30:31], s[2:3]
+; GFX12-NEXT:    s_swappc_b64 s[30:31], s[0:1]
 ; GFX12-NEXT:    s_endpgm
 ;
 ; GFX942-LABEL: test_alloca_and_call_var:
@@ -298,14 +365,15 @@ define amdgpu_cs_chain void @test_alloca_and_call_var(i32 %count) {
 ; GFX942-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX942-NEXT:    s_mov_b64 s[0:1], exec
 ; GFX942-NEXT:    s_mov_b32 s2, 0
-; GFX942-NEXT:    s_mov_b32 s32, 16
-; GFX942-NEXT:  .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_add_i32 s32, s32, 16
+; GFX942-NEXT:  .LBB6_1: ; =>This Inner Loop Header: Depth=1
 ; GFX942-NEXT:    s_ff1_i32_b64 s3, s[0:1]
 ; GFX942-NEXT:    v_readlane_b32 s4, v1, s3
 ; GFX942-NEXT:    s_bitset0_b64 s[0:1], s3
 ; GFX942-NEXT:    s_max_u32 s2, s2, s4
 ; GFX942-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX942-NEXT:    s_cbranch_scc1 .LBB5_1
+; GFX942-NEXT:    s_cbranch_scc1 .LBB6_1
 ; GFX942-NEXT:  ; %bb.2:
 ; GFX942-NEXT:    s_getpc_b64 s[0:1]
 ; GFX942-NEXT:    s_add_u32 s0, s0, foo at gotpcrel32@lo+4
@@ -333,17 +401,17 @@ define amdgpu_cs_chain void @test_call_and_alloca() {
 ; GFX12-NEXT:    s_wait_samplecnt 0x0
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_add_co_i32 s32, s32, 16
 ; GFX12-NEXT:    s_getpc_b64 s[0:1]
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_sext_i32_i16 s1, s1
 ; GFX12-NEXT:    s_add_co_u32 s0, s0, foo at gotpcrel32@lo+12
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_add_co_ci_u32 s1, s1, foo at gotpcrel32@hi+24
-; GFX12-NEXT:    s_mov_b32 s32, 16
-; GFX12-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
 ; GFX12-NEXT:    s_mov_b32 s4, s32
-; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_add_co_i32 s32, s4, 0x200
+; GFX12-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
+; GFX12-NEXT:    s_add_co_i32 s32, s4, 0x400
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_swappc_b64 s[30:31], s[0:1]
@@ -354,11 +422,12 @@ define amdgpu_cs_chain void @test_call_and_alloca() {
 ; GFX942-LABEL: test_call_and_alloca:
 ; GFX942:       ; %bb.0: ; %.entry
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_add_i32 s32, s32, 16
 ; GFX942-NEXT:    s_getpc_b64 s[0:1]
 ; GFX942-NEXT:    s_add_u32 s0, s0, foo at gotpcrel32@lo+4
 ; GFX942-NEXT:    s_addc_u32 s1, s1, foo at gotpcrel32@hi+12
 ; GFX942-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
-; GFX942-NEXT:    s_mov_b32 s32, 16
 ; GFX942-NEXT:    s_mov_b32 s4, s32
 ; GFX942-NEXT:    s_add_i32 s32, s4, 0x400
 ; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
@@ -384,6 +453,8 @@ define amdgpu_cs_chain void @test_call_and_alloca_var_uniform(i32 inreg %count)
 ; GFX12-NEXT:    s_wait_samplecnt 0x0
 ; GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_add_co_i32 s32, s32, 16
 ; GFX12-NEXT:    s_getpc_b64 s[2:3]
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_sext_i32_i16 s3, s3
@@ -392,11 +463,10 @@ define amdgpu_cs_chain void @test_call_and_alloca_var_uniform(i32 inreg %count)
 ; GFX12-NEXT:    s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
 ; GFX12-NEXT:    s_lshl2_add_u32 s0, s0, 15
 ; GFX12-NEXT:    s_load_b64 s[2:3], s[2:3], 0x0
-; GFX12-NEXT:    s_mov_b32 s32, 16
 ; GFX12-NEXT:    s_and_b32 s0, s0, -16
-; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_mov_b32 s4, s32
-; GFX12-NEXT:    s_lshl_b32 s0, s0, 5
+; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12-NEXT:    s_lshl_b32 s0, s0, 6
 ; GFX12-NEXT:    v_mov_b32_e32 v40, 0
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
 ; GFX12-NEXT:    s_add_co_i32 s32, s4, s0
@@ -411,12 +481,13 @@ define amdgpu_cs_chain void @test_call_and_alloca_var_uniform(i32 inreg %count)
 ; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX942-NEXT:    s_lshl2_add_u32 s0, s0, 15
 ; GFX942-NEXT:    s_and_b32 s0, s0, -16
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_add_i32 s32, s32, 16
 ; GFX942-NEXT:    s_lshl_b32 s2, s0, 6
 ; GFX942-NEXT:    s_getpc_b64 s[0:1]
 ; GFX942-NEXT:    s_add_u32 s0, s0, foo at gotpcrel32@lo+4
 ; GFX942-NEXT:    s_addc_u32 s1, s1, foo at gotpcrel32@hi+12
 ; GFX942-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
-; GFX942-NEXT:    s_mov_b32 s32, 16
 ; GFX942-NEXT:    s_mov_b32 s4, s32
 ; GFX942-NEXT:    v_mov_b32_e32 v40, 0
 ; GFX942-NEXT:    s_add_i32 s32, s4, s2
@@ -440,36 +511,36 @@ define amdgpu_cs_chain void @test_call_and_alloca_var(i32 %count) {
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    v_lshl_add_u32 v0, v8, 2, 15
 ; GFX12-NEXT:    v_mov_b32_e32 v40, 0
-; GFX12-NEXT:    s_mov_b32 s1, exec_lo
-; GFX12-NEXT:    s_mov_b32 s0, 0
-; GFX12-NEXT:    s_mov_b32 s32, 16
+; GFX12-NEXT:    s_mov_b64 s[0:1], exec
+; GFX12-NEXT:    s_mov_b32 s2, 0
+; GFX12-NEXT:    s_mov_b32 s33, s32
 ; GFX12-NEXT:    v_and_b32_e32 v0, -16, v0
-; GFX12-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT:    s_add_co_i32 s32, s32, 16
+; GFX12-NEXT:  .LBB9_1: ; =>This Inner Loop Header: Depth=1
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_ctz_i32_b32 s2, s1
+; GFX12-NEXT:    s_ctz_i32_b64 s3, s[0:1]
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT:    v_readlane_b32 s3, v0, s2
-; GFX12-NEXT:    s_bitset0_b32 s1, s2
-; GFX12-NEXT:    s_max_u32 s0, s0, s3
+; GFX12-NEXT:    v_readlane_b32 s4, v0, s3
+; GFX12-NEXT:    s_bitset0_b64 s[0:1], s3
+; GFX12-NEXT:    s_max_u32 s2, s2, s4
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX12-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX12-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX12-NEXT:    s_cbranch_scc1 .LBB9_1
 ; GFX12-NEXT:  ; %bb.2:
-; GFX12-NEXT:    s_getpc_b64 s[2:3]
+; GFX12-NEXT:    s_getpc_b64 s[0:1]
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_sext_i32_i16 s3, s3
-; GFX12-NEXT:    s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
+; GFX12-NEXT:    s_sext_i32_i16 s1, s1
+; GFX12-NEXT:    s_add_co_u32 s0, s0, foo at gotpcrel32@lo+12
 ; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
-; GFX12-NEXT:    s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
+; GFX12-NEXT:    s_add_co_ci_u32 s1, s1, foo at gotpcrel32@hi+24
 ; GFX12-NEXT:    s_mov_b32 s4, s32
-; GFX12-NEXT:    s_load_b64 s[2:3], s[2:3], 0x0
-; GFX12-NEXT:    v_lshl_add_u32 v0, s0, 5, s4
+; GFX12-NEXT:    s_load_b64 s[0:1], s[0:1], 0x0
+; GFX12-NEXT:    v_lshl_add_u32 v0, s2, 6, s4
 ; GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX12-NEXT:    v_readfirstlane_b32 s32, v0
 ; GFX12-NEXT:    s_wait_kmcnt 0x0
 ; GFX12-NEXT:    s_wait_alu depctr_va_sdst(0)
-; GFX12-NEXT:    s_swappc_b64 s[30:31], s[2:3]
+; GFX12-NEXT:    s_swappc_b64 s[30:31], s[0:1]
 ; GFX12-NEXT:    scratch_store_b32 off, v40, s4
 ; GFX12-NEXT:    s_endpgm
 ;
@@ -481,14 +552,15 @@ define amdgpu_cs_chain void @test_call_and_alloca_var(i32 %count) {
 ; GFX942-NEXT:    v_mov_b32_e32 v40, 0
 ; GFX942-NEXT:    s_mov_b64 s[0:1], exec
 ; GFX942-NEXT:    s_mov_b32 s2, 0
-; GFX942-NEXT:    s_mov_b32 s32, 16
-; GFX942-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_add_i32 s32, s32, 16
+; GFX942-NEXT:  .LBB9_1: ; =>This Inner Loop Header: Depth=1
 ; GFX942-NEXT:    s_ff1_i32_b64 s3, s[0:1]
 ; GFX942-NEXT:    v_readlane_b32 s4, v0, s3
 ; GFX942-NEXT:    s_bitset0_b64 s[0:1], s3
 ; GFX942-NEXT:    s_max_u32 s2, s2, s4
 ; GFX942-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX942-NEXT:    s_cbranch_scc1 .LBB8_1
+; GFX942-NEXT:    s_cbranch_scc1 .LBB9_1
 ; GFX942-NEXT:  ; %bb.2:
 ; GFX942-NEXT:    s_getpc_b64 s[0:1]
 ; GFX942-NEXT:    s_add_u32 s0, s0, foo at gotpcrel32@lo+4
@@ -508,3 +580,66 @@ define amdgpu_cs_chain void @test_call_and_alloca_var(i32 %count) {
   store i32 0, ptr addrspace(5) %v, align 4
   ret void
 }
+
+define amdgpu_cs_chain void @test_fp_all() #0 {
+; GFX12-LABEL: test_fp_all:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_endpgm
+;
+; GFX942-LABEL: test_fp_all:
+; GFX942:       ; %bb.0:
+; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_endpgm
+  ret void
+}
+
+define amdgpu_cs_chain void @test_fp_all_chain() #0 {
+; GFX12-LABEL: test_fp_all_chain:
+; GFX12:       ; %bb.0:
+; GFX12-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT:    s_wait_expcnt 0x0
+; GFX12-NEXT:    s_wait_samplecnt 0x0
+; GFX12-NEXT:    s_wait_bvhcnt 0x0
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_getpc_b64 s[0:1]
+; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12-NEXT:    s_sext_i32_i16 s1, s1
+; GFX12-NEXT:    s_add_co_u32 s0, s0, test_fp_all at gotpcrel32@lo+12
+; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12-NEXT:    s_add_co_ci_u32 s1, s1, test_fp_all at gotpcrel32@hi+24
+; GFX12-NEXT:    v_mov_b32_e32 v8, 57
+; GFX12-NEXT:    s_load_b64 s[2:3], s[0:1], 0x0
+; GFX12-NEXT:    s_mov_b32 s0, 51
+; GFX12-NEXT:    s_mov_b32 s33, s32
+; GFX12-NEXT:    s_wait_kmcnt 0x0
+; GFX12-NEXT:    s_mov_b64 exec, -1
+; GFX12-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GFX12-NEXT:    s_setpc_b64 s[2:3]
+;
+; GFX942-LABEL: test_fp_all_chain:
+; GFX942:       ; %bb.0:
+; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT:    s_getpc_b64 s[0:1]
+; GFX942-NEXT:    s_add_u32 s0, s0, test_fp_all at gotpcrel32@lo+4
+; GFX942-NEXT:    s_addc_u32 s1, s1, test_fp_all at gotpcrel32@hi+12
+; GFX942-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX942-NEXT:    s_mov_b32 s0, 51
+; GFX942-NEXT:    v_mov_b32_e32 v8, 57
+; GFX942-NEXT:    s_mov_b32 s33, s32
+; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942-NEXT:    s_mov_b64 exec, -1
+; GFX942-NEXT:    s_setpc_b64 s[2:3]
+  call void(ptr, i64, i32, i32, i32, ...) @llvm.amdgcn.cs.chain.i32(ptr @test_fp_all, i64 -1, i32 inreg 51, i32 57, i32 0)
+  unreachable
+}
+
+declare void @llvm.amdgcn.cs.chain.i32(ptr, i64, i32, i32, i32, ...)
+
+attributes #0 = { "frame-pointer"="all" }

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-preserve-cc.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-preserve-cc.ll
index 36e2db0c4879d..cdd569fa17236 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-preserve-cc.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-preserve-cc.ll
@@ -40,6 +40,7 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %a, <3 x i32> %b) {
 ; GISEL-GFX11-NEXT:    v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v1
 ; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
 ; GISEL-GFX11-NEXT:    s_mov_b32 s0, s3
+; GISEL-GFX11-NEXT:    s_mov_b32 s32, 0
 ; GISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
 ; GISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
 ;
@@ -53,6 +54,7 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %a, <3 x i32> %b) {
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v10, v2
 ; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
 ; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
+; GISEL-GFX10-NEXT:    s_mov_b32 s32, 0
 ; GISEL-GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GISEL-GFX10-NEXT:    s_bitset0_b32 s103, 21
 ; GISEL-GFX10-NEXT:    s_add_u32 s100, s100, s3
@@ -80,6 +82,7 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %a, <3 x i32> %b) {
 ; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v8, v3 :: v_dual_mov_b32 v9, v1
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s0, s3
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
 ; DAGISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
 ;
@@ -93,6 +96,7 @@ define amdgpu_cs void @cs_to_chain_preserve(<3 x i32> inreg %a, <3 x i32> %b) {
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v10, v2
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL-GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; DAGISEL-GFX10-NEXT:    s_bitset0_b32 s103, 21
 ; DAGISEL-GFX10-NEXT:    s_add_u32 s100, s100, s3
@@ -181,13 +185,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_preserve(<3 x i32>
 ; GISEL-GFX11-LABEL: chain_preserve_to_chain_preserve:
 ; GISEL-GFX11:       ; %bb.0:
 ; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, off ; 4-byte Folded Spill
+; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32 ; 4-byte Folded Spill
 ; GISEL-GFX11-NEXT:    s_mov_b32 s3, s0
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v1, v8
 ; GISEL-GFX11-NEXT:    ;;#ASMSTART
 ; GISEL-GFX11-NEXT:    s_nop
 ; GISEL-GFX11-NEXT:    ;;#ASMEND
-; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, off ; 4-byte Folded Reload
+; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s32 ; 4-byte Folded Reload
 ; GISEL-GFX11-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
 ; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v8, v1
@@ -198,13 +202,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_preserve(<3 x i32>
 ; GISEL-GFX10-LABEL: chain_preserve_to_chain_preserve:
 ; GISEL-GFX10:       ; %bb.0:
 ; GISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], 0 ; 4-byte Folded Spill
+; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32 ; 4-byte Folded Spill
 ; GISEL-GFX10-NEXT:    s_mov_b32 s3, s0
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v1, v8
 ; GISEL-GFX10-NEXT:    ;;#ASMSTART
 ; GISEL-GFX10-NEXT:    s_nop
 ; GISEL-GFX10-NEXT:    ;;#ASMEND
-; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], 0 ; 4-byte Folded Reload
+; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s32 ; 4-byte Folded Reload
 ; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
 ; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v1
@@ -215,13 +219,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_preserve(<3 x i32>
 ; DAGISEL-GFX11-LABEL: chain_preserve_to_chain_preserve:
 ; DAGISEL-GFX11:       ; %bb.0:
 ; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, off ; 4-byte Folded Spill
+; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32 ; 4-byte Folded Spill
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v1, v8
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s3, s0
 ; DAGISEL-GFX11-NEXT:    ;;#ASMSTART
 ; DAGISEL-GFX11-NEXT:    s_nop
 ; DAGISEL-GFX11-NEXT:    ;;#ASMEND
-; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, off ; 4-byte Folded Reload
+; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s32 ; 4-byte Folded Reload
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v8, v1
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
@@ -232,13 +236,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_preserve(<3 x i32>
 ; DAGISEL-GFX10-LABEL: chain_preserve_to_chain_preserve:
 ; DAGISEL-GFX10:       ; %bb.0:
 ; DAGISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], 0 ; 4-byte Folded Spill
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32 ; 4-byte Folded Spill
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v1, v8
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s3, s0
 ; DAGISEL-GFX10-NEXT:    ;;#ASMSTART
 ; DAGISEL-GFX10-NEXT:    s_nop
 ; DAGISEL-GFX10-NEXT:    ;;#ASMEND
-; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], 0 ; 4-byte Folded Reload
+; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s32 ; 4-byte Folded Reload
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v1
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
@@ -254,13 +258,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain(<3 x i32> inreg %a
 ; GISEL-GFX11-LABEL: chain_preserve_to_chain:
 ; GISEL-GFX11:       ; %bb.0:
 ; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, off ; 4-byte Folded Spill
+; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32 ; 4-byte Folded Spill
 ; GISEL-GFX11-NEXT:    s_mov_b32 s3, s0
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v1, v8
 ; GISEL-GFX11-NEXT:    ;;#ASMSTART
 ; GISEL-GFX11-NEXT:    s_nop
 ; GISEL-GFX11-NEXT:    ;;#ASMEND
-; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, off ; 4-byte Folded Reload
+; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s32 ; 4-byte Folded Reload
 ; GISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v8, v1
@@ -271,13 +275,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain(<3 x i32> inreg %a
 ; GISEL-GFX10-LABEL: chain_preserve_to_chain:
 ; GISEL-GFX10:       ; %bb.0:
 ; GISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], 0 ; 4-byte Folded Spill
+; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32 ; 4-byte Folded Spill
 ; GISEL-GFX10-NEXT:    s_mov_b32 s3, s0
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v1, v8
 ; GISEL-GFX10-NEXT:    ;;#ASMSTART
 ; GISEL-GFX10-NEXT:    s_nop
 ; GISEL-GFX10-NEXT:    ;;#ASMEND
-; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], 0 ; 4-byte Folded Reload
+; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s32 ; 4-byte Folded Reload
 ; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v1
@@ -288,13 +292,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain(<3 x i32> inreg %a
 ; DAGISEL-GFX11-LABEL: chain_preserve_to_chain:
 ; DAGISEL-GFX11:       ; %bb.0:
 ; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, off ; 4-byte Folded Spill
+; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32 ; 4-byte Folded Spill
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v1, v8
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s3, s0
 ; DAGISEL-GFX11-NEXT:    ;;#ASMSTART
 ; DAGISEL-GFX11-NEXT:    s_nop
 ; DAGISEL-GFX11-NEXT:    ;;#ASMEND
-; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, off ; 4-byte Folded Reload
+; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s32 ; 4-byte Folded Reload
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v8, v1
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
@@ -305,13 +309,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain(<3 x i32> inreg %a
 ; DAGISEL-GFX10-LABEL: chain_preserve_to_chain:
 ; DAGISEL-GFX10:       ; %bb.0:
 ; DAGISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], 0 ; 4-byte Folded Spill
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32 ; 4-byte Folded Spill
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v1, v8
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s3, s0
 ; DAGISEL-GFX10-NEXT:    ;;#ASMSTART
 ; DAGISEL-GFX10-NEXT:    s_nop
 ; DAGISEL-GFX10-NEXT:    ;;#ASMEND
-; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], 0 ; 4-byte Folded Reload
+; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s32 ; 4-byte Folded Reload
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v1
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
@@ -327,7 +331,7 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_wwm(<3 x i32> inre
 ; GISEL-GFX11-LABEL: chain_preserve_to_chain_wwm:
 ; GISEL-GFX11:       ; %bb.0:
 ; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, off ; 4-byte Folded Spill
+; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32 ; 4-byte Folded Spill
 ; GISEL-GFX11-NEXT:    s_mov_b32 s3, s0
 ; GISEL-GFX11-NEXT:    s_or_saveexec_b32 s0, -1
 ; GISEL-GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
@@ -336,7 +340,7 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_wwm(<3 x i32> inre
 ; GISEL-GFX11-NEXT:    ;;#ASMSTART
 ; GISEL-GFX11-NEXT:    s_nop
 ; GISEL-GFX11-NEXT:    ;;#ASMEND
-; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, off ; 4-byte Folded Reload
+; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s32 ; 4-byte Folded Reload
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v2, v1
 ; GISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
@@ -349,7 +353,7 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_wwm(<3 x i32> inre
 ; GISEL-GFX10-LABEL: chain_preserve_to_chain_wwm:
 ; GISEL-GFX10:       ; %bb.0:
 ; GISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], 0 ; 4-byte Folded Spill
+; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32 ; 4-byte Folded Spill
 ; GISEL-GFX10-NEXT:    s_mov_b32 s3, s0
 ; GISEL-GFX10-NEXT:    s_or_saveexec_b32 s0, -1
 ; GISEL-GFX10-NEXT:    v_cndmask_b32_e64 v1, 4, 3, s0
@@ -357,7 +361,7 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_wwm(<3 x i32> inre
 ; GISEL-GFX10-NEXT:    ;;#ASMSTART
 ; GISEL-GFX10-NEXT:    s_nop
 ; GISEL-GFX10-NEXT:    ;;#ASMEND
-; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], 0 ; 4-byte Folded Reload
+; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s32 ; 4-byte Folded Reload
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v2, v1
 ; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
@@ -369,7 +373,7 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_wwm(<3 x i32> inre
 ; DAGISEL-GFX11-LABEL: chain_preserve_to_chain_wwm:
 ; DAGISEL-GFX11:       ; %bb.0:
 ; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, off ; 4-byte Folded Spill
+; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32 ; 4-byte Folded Spill
 ; DAGISEL-GFX11-NEXT:    s_or_saveexec_b32 s4, -1
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s3, s0
 ; DAGISEL-GFX11-NEXT:    v_cndmask_b32_e64 v1, 4, 3, s4
@@ -377,7 +381,7 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_wwm(<3 x i32> inre
 ; DAGISEL-GFX11-NEXT:    ;;#ASMSTART
 ; DAGISEL-GFX11-NEXT:    s_nop
 ; DAGISEL-GFX11-NEXT:    ;;#ASMEND
-; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, off ; 4-byte Folded Reload
+; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s32 ; 4-byte Folded Reload
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v2, v1
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
@@ -390,7 +394,7 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_wwm(<3 x i32> inre
 ; DAGISEL-GFX10-LABEL: chain_preserve_to_chain_wwm:
 ; DAGISEL-GFX10:       ; %bb.0:
 ; DAGISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], 0 ; 4-byte Folded Spill
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32 ; 4-byte Folded Spill
 ; DAGISEL-GFX10-NEXT:    s_or_saveexec_b32 s4, -1
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s3, s0
 ; DAGISEL-GFX10-NEXT:    v_cndmask_b32_e64 v1, 4, 3, s4
@@ -398,7 +402,7 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_wwm(<3 x i32> inre
 ; DAGISEL-GFX10-NEXT:    ;;#ASMSTART
 ; DAGISEL-GFX10-NEXT:    s_nop
 ; DAGISEL-GFX10-NEXT:    ;;#ASMEND
-; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], 0 ; 4-byte Folded Reload
+; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s32 ; 4-byte Folded Reload
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v2, v1
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
@@ -418,9 +422,9 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_use_all_v0_v7(<3 x
 ; GISEL-GFX11-LABEL: chain_preserve_to_chain_use_all_v0_v7:
 ; GISEL-GFX11:       ; %bb.0:
 ; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT:    s_clause 0x1
-; GISEL-GFX11-NEXT:    scratch_store_b32 off, v11, off offset:4
-; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, off
+; GISEL-GFX11-NEXT:    s_clause 0x1 ; 8-byte Folded Spill
+; GISEL-GFX11-NEXT:    scratch_store_b32 off, v11, s32 offset:4
+; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v11, v8
 ; GISEL-GFX11-NEXT:    s_mov_b32 s3, s0
 ; GISEL-GFX11-NEXT:    ;;#ASMSTART
@@ -429,9 +433,9 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_use_all_v0_v7(<3 x
 ; GISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v8, v11
-; GISEL-GFX11-NEXT:    s_clause 0x1
-; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, off
-; GISEL-GFX11-NEXT:    scratch_load_b32 v11, off, off offset:4
+; GISEL-GFX11-NEXT:    s_clause 0x1 ; 8-byte Folded Reload
+; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s32
+; GISEL-GFX11-NEXT:    scratch_load_b32 v11, off, s32 offset:4
 ; GISEL-GFX11-NEXT:    s_mov_b32 s0, s3
 ; GISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
 ; GISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
@@ -439,8 +443,8 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_use_all_v0_v7(<3 x
 ; GISEL-GFX10-LABEL: chain_preserve_to_chain_use_all_v0_v7:
 ; GISEL-GFX10:       ; %bb.0:
 ; GISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX10-NEXT:    buffer_store_dword v11, off, s[48:51], 0 offset:4 ; 4-byte Folded Spill
-; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], 0 ; 4-byte Folded Spill
+; GISEL-GFX10-NEXT:    buffer_store_dword v11, off, s[48:51], s32 offset:4 ; 4-byte Folded Spill
+; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32 ; 4-byte Folded Spill
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v11, v8
 ; GISEL-GFX10-NEXT:    s_mov_b32 s3, s0
 ; GISEL-GFX10-NEXT:    ;;#ASMSTART
@@ -449,9 +453,9 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_use_all_v0_v7(<3 x
 ; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v11
-; GISEL-GFX10-NEXT:    s_clause 0x1
-; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], 0
-; GISEL-GFX10-NEXT:    buffer_load_dword v11, off, s[48:51], 0 offset:4
+; GISEL-GFX10-NEXT:    s_clause 0x1 ; 8-byte Folded Reload
+; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s32
+; GISEL-GFX10-NEXT:    buffer_load_dword v11, off, s[48:51], s32 offset:4
 ; GISEL-GFX10-NEXT:    s_mov_b32 s0, s3
 ; GISEL-GFX10-NEXT:    s_mov_b32 exec_lo, -1
 ; GISEL-GFX10-NEXT:    s_setpc_b64 s[4:5]
@@ -459,9 +463,9 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_use_all_v0_v7(<3 x
 ; DAGISEL-GFX11-LABEL: chain_preserve_to_chain_use_all_v0_v7:
 ; DAGISEL-GFX11:       ; %bb.0:
 ; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX11-NEXT:    s_clause 0x1
-; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v11, off offset:4
-; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, off
+; DAGISEL-GFX11-NEXT:    s_clause 0x1 ; 8-byte Folded Spill
+; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v11, s32 offset:4
+; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v11, v8
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s3, s0
 ; DAGISEL-GFX11-NEXT:    ;;#ASMSTART
@@ -470,9 +474,9 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_use_all_v0_v7(<3 x
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v8, v11
-; DAGISEL-GFX11-NEXT:    s_clause 0x1
-; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, off
-; DAGISEL-GFX11-NEXT:    scratch_load_b32 v11, off, off offset:4
+; DAGISEL-GFX11-NEXT:    s_clause 0x1 ; 8-byte Folded Reload
+; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s32
+; DAGISEL-GFX11-NEXT:    scratch_load_b32 v11, off, s32 offset:4
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s0, s3
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
 ; DAGISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
@@ -480,8 +484,8 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_use_all_v0_v7(<3 x
 ; DAGISEL-GFX10-LABEL: chain_preserve_to_chain_use_all_v0_v7:
 ; DAGISEL-GFX10:       ; %bb.0:
 ; DAGISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v11, off, s[48:51], 0 offset:4 ; 4-byte Folded Spill
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], 0 ; 4-byte Folded Spill
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v11, off, s[48:51], s32 offset:4 ; 4-byte Folded Spill
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32 ; 4-byte Folded Spill
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v11, v8
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s3, s0
 ; DAGISEL-GFX10-NEXT:    ;;#ASMSTART
@@ -490,9 +494,9 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_use_all_v0_v7(<3 x
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_callee at abs32@hi
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_callee at abs32@lo
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v11
-; DAGISEL-GFX10-NEXT:    s_clause 0x1
-; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], 0
-; DAGISEL-GFX10-NEXT:    buffer_load_dword v11, off, s[48:51], 0 offset:4
+; DAGISEL-GFX10-NEXT:    s_clause 0x1 ; 8-byte Folded Reload
+; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s32
+; DAGISEL-GFX10-NEXT:    buffer_load_dword v11, off, s[48:51], s32 offset:4
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s0, s3
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 exec_lo, -1
 ; DAGISEL-GFX10-NEXT:    s_setpc_b64 s[4:5]
@@ -505,13 +509,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_preserve_fewer_arg
 ; GISEL-GFX11-LABEL: chain_preserve_to_chain_preserve_fewer_args:
 ; GISEL-GFX11:       ; %bb.0:
 ; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, off ; 4-byte Folded Spill
+; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32 ; 4-byte Folded Spill
 ; GISEL-GFX11-NEXT:    s_mov_b32 s2, s0
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v1, v8
 ; GISEL-GFX11-NEXT:    ;;#ASMSTART
 ; GISEL-GFX11-NEXT:    s_nop
 ; GISEL-GFX11-NEXT:    ;;#ASMEND
-; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, off ; 4-byte Folded Reload
+; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s32 ; 4-byte Folded Reload
 ; GISEL-GFX11-NEXT:    s_mov_b32 s4, chain_preserve_callee_2 at abs32@lo
 ; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_preserve_callee_2 at abs32@hi
 ; GISEL-GFX11-NEXT:    v_mov_b32_e32 v8, v1
@@ -522,13 +526,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_preserve_fewer_arg
 ; GISEL-GFX10-LABEL: chain_preserve_to_chain_preserve_fewer_args:
 ; GISEL-GFX10:       ; %bb.0:
 ; GISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], 0 ; 4-byte Folded Spill
+; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32 ; 4-byte Folded Spill
 ; GISEL-GFX10-NEXT:    s_mov_b32 s2, s0
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v1, v8
 ; GISEL-GFX10-NEXT:    ;;#ASMSTART
 ; GISEL-GFX10-NEXT:    s_nop
 ; GISEL-GFX10-NEXT:    ;;#ASMEND
-; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], 0 ; 4-byte Folded Reload
+; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s32 ; 4-byte Folded Reload
 ; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_preserve_callee_2 at abs32@lo
 ; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_preserve_callee_2 at abs32@hi
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v1
@@ -539,13 +543,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_preserve_fewer_arg
 ; DAGISEL-GFX11-LABEL: chain_preserve_to_chain_preserve_fewer_args:
 ; DAGISEL-GFX11:       ; %bb.0:
 ; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, off ; 4-byte Folded Spill
+; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s32 ; 4-byte Folded Spill
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v1, v8
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s2, s0
 ; DAGISEL-GFX11-NEXT:    ;;#ASMSTART
 ; DAGISEL-GFX11-NEXT:    s_nop
 ; DAGISEL-GFX11-NEXT:    ;;#ASMEND
-; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, off ; 4-byte Folded Reload
+; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s32 ; 4-byte Folded Reload
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s5, chain_preserve_callee_2 at abs32@hi
 ; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v8, v1
 ; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_preserve_callee_2 at abs32@lo
@@ -556,13 +560,13 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_preserve_fewer_arg
 ; DAGISEL-GFX10-LABEL: chain_preserve_to_chain_preserve_fewer_args:
 ; DAGISEL-GFX10:       ; %bb.0:
 ; DAGISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], 0 ; 4-byte Folded Spill
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s32 ; 4-byte Folded Spill
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v1, v8
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s2, s0
 ; DAGISEL-GFX10-NEXT:    ;;#ASMSTART
 ; DAGISEL-GFX10-NEXT:    s_nop
 ; DAGISEL-GFX10-NEXT:    ;;#ASMEND
-; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], 0 ; 4-byte Folded Reload
+; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s32 ; 4-byte Folded Reload
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_preserve_callee_2 at abs32@hi
 ; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v1
 ; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_preserve_callee_2 at abs32@lo
@@ -579,72 +583,150 @@ define amdgpu_cs_chain_preserve void @chain_preserve_to_chain_preserve_fewer_arg
 ; Note that amdgpu_cs_chain_preserve functions are not allowed to call
 ; llvm.amdgcn.cs.chain with more vgpr args than they received as parameters.
 
-define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_preserve_dont_realign_stack(i32 %idx) {
-; GISEL-GFX11-LABEL: amdgpu_cs_chain_preserve_dont_realign_stack:
+define amdgpu_cs_chain_preserve void @amdgpu_cs_chain_preserve_realign_stack(i32 %idx, <3 x i32> inreg %a, <3 x i32> %b) {
+; GISEL-GFX11-LABEL: amdgpu_cs_chain_preserve_realign_stack:
 ; GISEL-GFX11:       ; %bb.0:
 ; GISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-GFX11-NEXT:    s_mov_b32 s3, 4
-; GISEL-GFX11-NEXT:    s_mov_b32 s2, 3
-; GISEL-GFX11-NEXT:    s_mov_b32 s1, 2
-; GISEL-GFX11-NEXT:    s_mov_b32 s0, 1
-; GISEL-GFX11-NEXT:    v_lshlrev_b32_e32 v0, 4, v8
-; GISEL-GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GISEL-GFX11-NEXT:    v_mov_b32_e32 v4, v0
-; GISEL-GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GISEL-GFX11-NEXT:    v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GISEL-GFX11-NEXT:    scratch_store_b128 v4, v[0:3], off dlc
+; GISEL-GFX11-NEXT:    s_mov_b32 s7, 4
+; GISEL-GFX11-NEXT:    s_mov_b32 s6, 3
+; GISEL-GFX11-NEXT:    s_mov_b32 s5, 2
+; GISEL-GFX11-NEXT:    s_mov_b32 s4, 1
+; GISEL-GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_lshlrev_b32 v0, 4, v8
+; GISEL-GFX11-NEXT:    s_add_i32 s33, s32, 31
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v3, s6
+; GISEL-GFX11-NEXT:    s_and_not1_b32 s33, s33, 31
+; GISEL-GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v4, s7 :: v_dual_add_nc_u32 v5, s33, v0
+; GISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s33 ; 4-byte Folded Spill
+; GISEL-GFX11-NEXT:    s_mov_b32 s3, s0
+; GISEL-GFX11-NEXT:    ;;#ASMSTART
+; GISEL-GFX11-NEXT:    s_nop
+; GISEL-GFX11-NEXT:    ;;#ASMEND
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v8, v9 :: v_dual_add_nc_u32 v5, 32, v5
+; GISEL-GFX11-NEXT:    v_dual_mov_b32 v9, v10 :: v_dual_mov_b32 v10, v11
+; GISEL-GFX11-NEXT:    s_mov_b32 s34, s32
+; GISEL-GFX11-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
+; GISEL-GFX11-NEXT:    scratch_store_b128 v5, v[1:4], off dlc
 ; GISEL-GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; GISEL-GFX11-NEXT:    s_endpgm
+; GISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s33 ; 4-byte Folded Reload
+; GISEL-GFX11-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
+; GISEL-GFX11-NEXT:    s_mov_b32 s0, s3
+; GISEL-GFX11-NEXT:    s_addk_i32 s32, 0xe0
+; GISEL-GFX11-NEXT:    s_mov_b32 s32, s34
+; GISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
+; GISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
 ;
-; GISEL-GFX10-LABEL: amdgpu_cs_chain_preserve_dont_realign_stack:
+; GISEL-GFX10-LABEL: amdgpu_cs_chain_preserve_realign_stack:
 ; GISEL-GFX10:       ; %bb.0:
 ; GISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX10-NEXT:    s_add_i32 s33, s32, 0x3e0
 ; GISEL-GFX10-NEXT:    v_lshlrev_b32_e32 v0, 4, v8
+; GISEL-GFX10-NEXT:    s_and_b32 s33, s33, 0xfffffc00
 ; GISEL-GFX10-NEXT:    v_mov_b32_e32 v1, 1
-; GISEL-GFX10-NEXT:    v_mov_b32_e32 v2, 2
-; GISEL-GFX10-NEXT:    v_mov_b32_e32 v3, 3
-; GISEL-GFX10-NEXT:    v_mov_b32_e32 v4, 4
-; GISEL-GFX10-NEXT:    buffer_store_dword v1, v0, s[48:51], 0 offen
+; GISEL-GFX10-NEXT:    v_lshrrev_b32_e64 v3, 5, s33
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v4, 3
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v5, 4
+; GISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s33 ; 4-byte Folded Spill
+; GISEL-GFX10-NEXT:    s_mov_b32 s3, s0
+; GISEL-GFX10-NEXT:    v_add_nc_u32_e32 v2, v0, v3
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v3, 2
+; GISEL-GFX10-NEXT:    ;;#ASMSTART
+; GISEL-GFX10-NEXT:    s_nop
+; GISEL-GFX10-NEXT:    ;;#ASMEND
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v9
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v9, v10
+; GISEL-GFX10-NEXT:    v_add_nc_u32_e32 v2, 32, v2
+; GISEL-GFX10-NEXT:    v_mov_b32_e32 v10, v11
+; GISEL-GFX10-NEXT:    s_mov_b32 s34, s32
+; GISEL-GFX10-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
+; GISEL-GFX10-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
+; GISEL-GFX10-NEXT:    buffer_store_dword v1, v2, s[48:51], 0 offen
 ; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GISEL-GFX10-NEXT:    buffer_store_dword v2, v0, s[48:51], 0 offen offset:4
+; GISEL-GFX10-NEXT:    buffer_store_dword v3, v2, s[48:51], 0 offen offset:4
 ; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GISEL-GFX10-NEXT:    buffer_store_dword v3, v0, s[48:51], 0 offen offset:8
+; GISEL-GFX10-NEXT:    buffer_store_dword v4, v2, s[48:51], 0 offen offset:8
 ; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GISEL-GFX10-NEXT:    buffer_store_dword v4, v0, s[48:51], 0 offen offset:12
+; GISEL-GFX10-NEXT:    buffer_store_dword v5, v2, s[48:51], 0 offen offset:12
 ; GISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GISEL-GFX10-NEXT:    s_endpgm
+; GISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s33 ; 4-byte Folded Reload
+; GISEL-GFX10-NEXT:    s_mov_b32 s0, s3
+; GISEL-GFX10-NEXT:    s_addk_i32 s32, 0x1c00
+; GISEL-GFX10-NEXT:    s_mov_b32 s32, s34
+; GISEL-GFX10-NEXT:    s_mov_b32 exec_lo, -1
+; GISEL-GFX10-NEXT:    s_setpc_b64 s[4:5]
 ;
-; DAGISEL-GFX11-LABEL: amdgpu_cs_chain_preserve_dont_realign_stack:
+; DAGISEL-GFX11-LABEL: amdgpu_cs_chain_preserve_realign_stack:
 ; DAGISEL-GFX11:       ; %bb.0:
 ; DAGISEL-GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v0, 1 :: v_dual_mov_b32 v1, 2
-; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v2, 3 :: v_dual_mov_b32 v3, 4
-; DAGISEL-GFX11-NEXT:    v_lshl_add_u32 v4, v8, 4, 0
-; DAGISEL-GFX11-NEXT:    scratch_store_b128 v4, v[0:3], off dlc
+; DAGISEL-GFX11-NEXT:    s_add_i32 s33, s32, 31
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s34, s32
+; DAGISEL-GFX11-NEXT:    s_and_not1_b32 s33, s33, 31
+; DAGISEL-GFX11-NEXT:    s_addk_i32 s32, 0xe0
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s3, s0
+; DAGISEL-GFX11-NEXT:    s_add_i32 s0, s33, 32
+; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v1, 1 :: v_dual_mov_b32 v2, 2
+; DAGISEL-GFX11-NEXT:    v_lshl_add_u32 v5, v8, 4, s0
+; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v3, 3 :: v_dual_mov_b32 v4, 4
+; DAGISEL-GFX11-NEXT:    scratch_store_b32 off, v16, s33 ; 4-byte Folded Spill
+; DAGISEL-GFX11-NEXT:    ;;#ASMSTART
+; DAGISEL-GFX11-NEXT:    s_nop
+; DAGISEL-GFX11-NEXT:    ;;#ASMEND
+; DAGISEL-GFX11-NEXT:    v_dual_mov_b32 v8, v9 :: v_dual_mov_b32 v9, v10
+; DAGISEL-GFX11-NEXT:    scratch_store_b128 v5, v[1:4], off dlc
 ; DAGISEL-GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
-; DAGISEL-GFX11-NEXT:    s_endpgm
+; DAGISEL-GFX11-NEXT:    scratch_load_b32 v16, off, s33 ; 4-byte Folded Reload
+; DAGISEL-GFX11-NEXT:    v_mov_b32_e32 v10, v11
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s0, s3
+; DAGISEL-GFX11-NEXT:    s_mov_b32 s32, s34
+; DAGISEL-GFX11-NEXT:    s_mov_b32 exec_lo, -1
+; DAGISEL-GFX11-NEXT:    s_setpc_b64 s[4:5]
 ;
-; DAGISEL-GFX10-LABEL: amdgpu_cs_chain_preserve_dont_realign_stack:
+; DAGISEL-GFX10-LABEL: amdgpu_cs_chain_preserve_realign_stack:
 ; DAGISEL-GFX10:       ; %bb.0:
 ; DAGISEL-GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v0, 4
-; DAGISEL-GFX10-NEXT:    v_lshl_add_u32 v1, v8, 4, 0
-; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v2, 3
-; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v3, 2
-; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v4, 1
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v0, v1, s[48:51], 0 offen offset:12
+; DAGISEL-GFX10-NEXT:    s_add_i32 s33, s32, 0x3e0
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v2, 4
+; DAGISEL-GFX10-NEXT:    s_and_b32 s33, s33, 0xfffffc00
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v3, 3
+; DAGISEL-GFX10-NEXT:    v_lshrrev_b32_e64 v0, 5, s33
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v4, 2
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v5, 1
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v16, off, s[48:51], s33 ; 4-byte Folded Spill
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s3, s0
+; DAGISEL-GFX10-NEXT:    v_add_nc_u32_e32 v0, 32, v0
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s34, s32
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s5, chain_preserve_callee at abs32@hi
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s4, chain_preserve_callee at abs32@lo
+; DAGISEL-GFX10-NEXT:    s_addk_i32 s32, 0x1c00
+; DAGISEL-GFX10-NEXT:    v_lshl_add_u32 v1, v8, 4, v0
+; DAGISEL-GFX10-NEXT:    ;;#ASMSTART
+; DAGISEL-GFX10-NEXT:    s_nop
+; DAGISEL-GFX10-NEXT:    ;;#ASMEND
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v8, v9
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v9, v10
+; DAGISEL-GFX10-NEXT:    v_mov_b32_e32 v10, v11
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v2, v1, s[48:51], 0 offen offset:12
 ; DAGISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v2, v1, s[48:51], 0 offen offset:8
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v3, v1, s[48:51], 0 offen offset:8
 ; DAGISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v3, v1, s[48:51], 0 offen offset:4
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v4, v1, s[48:51], 0 offen offset:4
 ; DAGISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; DAGISEL-GFX10-NEXT:    buffer_store_dword v4, v1, s[48:51], 0 offen
+; DAGISEL-GFX10-NEXT:    buffer_store_dword v5, v1, s[48:51], 0 offen
 ; DAGISEL-GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; DAGISEL-GFX10-NEXT:    s_endpgm
+; DAGISEL-GFX10-NEXT:    buffer_load_dword v16, off, s[48:51], s33 ; 4-byte Folded Reload
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s0, s3
+; DAGISEL-GFX10-NEXT:    s_mov_b32 s32, s34
+; DAGISEL-GFX10-NEXT:    s_mov_b32 exec_lo, -1
+; DAGISEL-GFX10-NEXT:    s_setpc_b64 s[4:5]
+  call void asm "s_nop", "~{v0},~{v8},~{v16},~{s0}"()
   %alloca.align32 = alloca [8 x <4 x i32>], align 32, addrspace(5)
   %gep0 = getelementptr inbounds [8 x <4 x i32>], ptr addrspace(5) %alloca.align32, i32 0, i32 %idx
   store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, ptr addrspace(5) %gep0, align 32
-  ret void
+  call void(ptr, i32, <3 x i32>, <3 x i32>, i32, ...) @llvm.amdgcn.cs.chain.v3i32(ptr @chain_preserve_callee, i32 -1, <3 x i32> inreg %a, <3 x i32> %b, i32 0)
+  unreachable
 }
 
 declare void @llvm.amdgcn.cs.chain.v3i32(ptr, i32, <3 x i32>, <3 x i32>, i32, ...)

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
index f5b534c179019..a6659b2b1dd0d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
@@ -742,7 +742,6 @@ define amdgpu_cs_chain void @wwm_write_to_arg_reg(<3 x i32> inreg %sgpr, ptr inr
 ; GISEL12-NEXT:    s_wait_samplecnt 0x0
 ; GISEL12-NEXT:    s_wait_bvhcnt 0x0
 ; GISEL12-NEXT:    s_wait_kmcnt 0x0
-; GISEL12-NEXT:    s_mov_b32 s32, 0
 ; GISEL12-NEXT:    s_or_saveexec_b32 s9, -1
 ; GISEL12-NEXT:    s_or_saveexec_b32 s12, -1
 ; GISEL12-NEXT:    s_mov_b32 s6, s0
@@ -823,7 +822,6 @@ define amdgpu_cs_chain void @wwm_write_to_arg_reg(<3 x i32> inreg %sgpr, ptr inr
 ; DAGISEL12-NEXT:    s_wait_samplecnt 0x0
 ; DAGISEL12-NEXT:    s_wait_bvhcnt 0x0
 ; DAGISEL12-NEXT:    s_wait_kmcnt 0x0
-; DAGISEL12-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL12-NEXT:    s_or_saveexec_b32 s11, -1
 ; DAGISEL12-NEXT:    s_or_saveexec_b32 s6, -1
 ; DAGISEL12-NEXT:    v_dual_mov_b32 v39, v23 :: v_dual_mov_b32 v38, v22
@@ -900,7 +898,6 @@ define amdgpu_cs_chain void @wwm_write_to_arg_reg(<3 x i32> inreg %sgpr, ptr inr
 ; GISEL10-LABEL: wwm_write_to_arg_reg:
 ; GISEL10:       ; %bb.0: ; %entry
 ; GISEL10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL10-NEXT:    s_mov_b32 s32, 0
 ; GISEL10-NEXT:    s_or_saveexec_b32 s9, -1
 ; GISEL10-NEXT:    s_or_saveexec_b32 s12, -1
 ; GISEL10-NEXT:    s_mov_b32 s6, s0
@@ -1013,7 +1010,6 @@ define amdgpu_cs_chain void @wwm_write_to_arg_reg(<3 x i32> inreg %sgpr, ptr inr
 ; DAGISEL10-LABEL: wwm_write_to_arg_reg:
 ; DAGISEL10:       ; %bb.0: ; %entry
 ; DAGISEL10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL10-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL10-NEXT:    s_or_saveexec_b32 s11, -1
 ; DAGISEL10-NEXT:    s_or_saveexec_b32 s6, -1
 ; DAGISEL10-NEXT:    v_mov_b32_e32 v39, v23

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.set.inactive.chain.arg.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.set.inactive.chain.arg.ll
index 18098d0dd9c3d..5c97fd5875ad8 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.set.inactive.chain.arg.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.set.inactive.chain.arg.ll
@@ -291,7 +291,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_call(ptr addrspace(1) %out,
 ; GISEL11-LABEL: set_inactive_chain_arg_call:
 ; GISEL11:       ; %bb.0:
 ; GISEL11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL11-NEXT:    s_mov_b32 s32, 0
 ; GISEL11-NEXT:    v_dual_mov_b32 v41, v8 :: v_dual_mov_b32 v42, v9
 ; GISEL11-NEXT:    s_or_saveexec_b32 s0, -1
 ; GISEL11-NEXT:    v_mov_b32_e32 v40, v10
@@ -320,7 +319,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_call(ptr addrspace(1) %out,
 ; DAGISEL11-LABEL: set_inactive_chain_arg_call:
 ; DAGISEL11:       ; %bb.0:
 ; DAGISEL11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL11-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL11-NEXT:    v_mov_b32_e32 v43, v11
 ; DAGISEL11-NEXT:    s_or_saveexec_b32 s0, -1
 ; DAGISEL11-NEXT:    v_mov_b32_e32 v40, v10
@@ -349,7 +347,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_call(ptr addrspace(1) %out,
 ; GISEL10-LABEL: set_inactive_chain_arg_call:
 ; GISEL10:       ; %bb.0:
 ; GISEL10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL10-NEXT:    s_mov_b32 s32, 0
 ; GISEL10-NEXT:    v_mov_b32_e32 v41, v8
 ; GISEL10-NEXT:    v_mov_b32_e32 v42, v9
 ; GISEL10-NEXT:    s_or_saveexec_b32 s0, -1
@@ -386,7 +383,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_call(ptr addrspace(1) %out,
 ; DAGISEL10-LABEL: set_inactive_chain_arg_call:
 ; DAGISEL10:       ; %bb.0:
 ; DAGISEL10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL10-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL10-NEXT:    v_mov_b32_e32 v43, v11
 ; DAGISEL10-NEXT:    s_or_saveexec_b32 s0, -1
 ; DAGISEL10-NEXT:    v_mov_b32_e32 v40, v10
@@ -423,7 +419,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_call(ptr addrspace(1) %out,
 ; GISEL11_W64-LABEL: set_inactive_chain_arg_call:
 ; GISEL11_W64:       ; %bb.0:
 ; GISEL11_W64-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL11_W64-NEXT:    s_mov_b32 s32, 0
 ; GISEL11_W64-NEXT:    v_mov_b32_e32 v41, v8
 ; GISEL11_W64-NEXT:    v_mov_b32_e32 v42, v9
 ; GISEL11_W64-NEXT:    s_or_saveexec_b64 s[0:1], -1
@@ -459,7 +454,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_call(ptr addrspace(1) %out,
 ; DAGISEL11_W64-LABEL: set_inactive_chain_arg_call:
 ; DAGISEL11_W64:       ; %bb.0:
 ; DAGISEL11_W64-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL11_W64-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL11_W64-NEXT:    v_mov_b32_e32 v43, v11
 ; DAGISEL11_W64-NEXT:    s_or_saveexec_b64 s[0:1], -1
 ; DAGISEL11_W64-NEXT:    v_mov_b32_e32 v40, v10
@@ -495,7 +489,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_call(ptr addrspace(1) %out,
 ; GISEL10_W64-LABEL: set_inactive_chain_arg_call:
 ; GISEL10_W64:       ; %bb.0:
 ; GISEL10_W64-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL10_W64-NEXT:    s_mov_b32 s32, 0
 ; GISEL10_W64-NEXT:    v_mov_b32_e32 v41, v8
 ; GISEL10_W64-NEXT:    v_mov_b32_e32 v42, v9
 ; GISEL10_W64-NEXT:    s_or_saveexec_b64 s[0:1], -1
@@ -532,7 +525,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_call(ptr addrspace(1) %out,
 ; DAGISEL10_W64-LABEL: set_inactive_chain_arg_call:
 ; DAGISEL10_W64:       ; %bb.0:
 ; DAGISEL10_W64-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL10_W64-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL10_W64-NEXT:    v_mov_b32_e32 v43, v11
 ; DAGISEL10_W64-NEXT:    s_or_saveexec_b64 s[0:1], -1
 ; DAGISEL10_W64-NEXT:    v_mov_b32_e32 v40, v10
@@ -580,7 +572,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_last_vgpr(ptr addrspace(1) %
 ; GISEL11-LABEL: set_inactive_chain_arg_last_vgpr:
 ; GISEL11:       ; %bb.0:
 ; GISEL11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL11-NEXT:    s_mov_b32 s32, 0
 ; GISEL11-NEXT:    v_dual_mov_b32 v41, v8 :: v_dual_mov_b32 v42, v9
 ; GISEL11-NEXT:    v_mov_b32_e32 v43, v10
 ; GISEL11-NEXT:    s_or_saveexec_b32 s0, -1
@@ -609,7 +600,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_last_vgpr(ptr addrspace(1) %
 ; DAGISEL11-LABEL: set_inactive_chain_arg_last_vgpr:
 ; DAGISEL11:       ; %bb.0:
 ; DAGISEL11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL11-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL11-NEXT:    s_or_saveexec_b32 s0, -1
 ; DAGISEL11-NEXT:    v_mov_b32_e32 v40, v11
 ; DAGISEL11-NEXT:    s_mov_b32 exec_lo, s0
@@ -638,7 +628,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_last_vgpr(ptr addrspace(1) %
 ; GISEL10-LABEL: set_inactive_chain_arg_last_vgpr:
 ; GISEL10:       ; %bb.0:
 ; GISEL10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL10-NEXT:    s_mov_b32 s32, 0
 ; GISEL10-NEXT:    v_mov_b32_e32 v41, v8
 ; GISEL10-NEXT:    v_mov_b32_e32 v42, v9
 ; GISEL10-NEXT:    v_mov_b32_e32 v43, v10
@@ -675,7 +664,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_last_vgpr(ptr addrspace(1) %
 ; DAGISEL10-LABEL: set_inactive_chain_arg_last_vgpr:
 ; DAGISEL10:       ; %bb.0:
 ; DAGISEL10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL10-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL10-NEXT:    s_or_saveexec_b32 s0, -1
 ; DAGISEL10-NEXT:    v_mov_b32_e32 v40, v11
 ; DAGISEL10-NEXT:    s_mov_b32 exec_lo, s0
@@ -712,7 +700,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_last_vgpr(ptr addrspace(1) %
 ; GISEL11_W64-LABEL: set_inactive_chain_arg_last_vgpr:
 ; GISEL11_W64:       ; %bb.0:
 ; GISEL11_W64-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL11_W64-NEXT:    s_mov_b32 s32, 0
 ; GISEL11_W64-NEXT:    v_mov_b32_e32 v41, v8
 ; GISEL11_W64-NEXT:    v_mov_b32_e32 v42, v9
 ; GISEL11_W64-NEXT:    v_mov_b32_e32 v43, v10
@@ -748,7 +735,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_last_vgpr(ptr addrspace(1) %
 ; DAGISEL11_W64-LABEL: set_inactive_chain_arg_last_vgpr:
 ; DAGISEL11_W64:       ; %bb.0:
 ; DAGISEL11_W64-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL11_W64-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL11_W64-NEXT:    s_or_saveexec_b64 s[0:1], -1
 ; DAGISEL11_W64-NEXT:    v_mov_b32_e32 v40, v11
 ; DAGISEL11_W64-NEXT:    s_mov_b64 exec, s[0:1]
@@ -784,7 +770,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_last_vgpr(ptr addrspace(1) %
 ; GISEL10_W64-LABEL: set_inactive_chain_arg_last_vgpr:
 ; GISEL10_W64:       ; %bb.0:
 ; GISEL10_W64-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL10_W64-NEXT:    s_mov_b32 s32, 0
 ; GISEL10_W64-NEXT:    v_mov_b32_e32 v41, v8
 ; GISEL10_W64-NEXT:    v_mov_b32_e32 v42, v9
 ; GISEL10_W64-NEXT:    v_mov_b32_e32 v43, v10
@@ -821,7 +806,6 @@ define amdgpu_cs_chain void @set_inactive_chain_arg_last_vgpr(ptr addrspace(1) %
 ; DAGISEL10_W64-LABEL: set_inactive_chain_arg_last_vgpr:
 ; DAGISEL10_W64:       ; %bb.0:
 ; DAGISEL10_W64-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL10_W64-NEXT:    s_mov_b32 s32, 0
 ; DAGISEL10_W64-NEXT:    s_or_saveexec_b64 s[0:1], -1
 ; DAGISEL10_W64-NEXT:    v_mov_b32_e32 v40, v11
 ; DAGISEL10_W64-NEXT:    s_mov_b64 exec, s[0:1]

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.sponentry.ll b/llvm/test/CodeGen/AMDGPU/llvm.sponentry.ll
index 3ebfdcff4309f..caedcc5e44ba1 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.sponentry.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.sponentry.ll
@@ -268,7 +268,7 @@ define amdgpu_gfx ptr addrspace(5) @sponentry_gfx_dvgpr_realign(i32 %val) #0 {
 ; FIXME: Optimize away the sponentry fixed object.
 ; CHECK: ScratchSize: 384
 
-define amdgpu_gfx ptr addrspace(5) @sponentry_gfx_stack_args(<32 x i32> %fill.sgprs, i32 %val, ptr addrspace(5) %ptr) #0 {
+define amdgpu_gfx ptr addrspace(5) @sponentry_gfx_stack_args(<32 x i32> %fill.vgprs, i32 %val, ptr addrspace(5) %ptr) #0 {
 ; DAGISEL-LABEL: sponentry_gfx_stack_args:
 ; DAGISEL:       ; %bb.0:
 ; DAGISEL-NEXT:    s_wait_loadcnt_dscnt 0x0
@@ -393,6 +393,189 @@ define amdgpu_gfx ptr addrspace(5) @sponentry_gfx_dyn_alloc(i32 %val) #0 {
 
 ; CHECK: ScratchSize: 16
 
+define amdgpu_cs_chain void @sponentry_cs_chain(i32 %val, ptr addrspace(5) %ptr) #0 {
+; DAGISEL-LABEL: sponentry_cs_chain:
+; DAGISEL:       ; %bb.0: ; %entry
+; DAGISEL-NEXT:    s_wait_loadcnt_dscnt 0x0
+; DAGISEL-NEXT:    s_wait_expcnt 0x0
+; DAGISEL-NEXT:    s_wait_samplecnt 0x0
+; DAGISEL-NEXT:    s_wait_bvhcnt 0x0
+; DAGISEL-NEXT:    s_wait_kmcnt 0x0
+; DAGISEL-NEXT:    v_mov_b32_e32 v0, v9
+; DAGISEL-NEXT:    s_mov_b32 s0, exec_lo
+; DAGISEL-NEXT:    s_wait_storecnt 0x0
+; DAGISEL-NEXT:    scratch_store_b32 off, v8, s32 offset:4 scope:SCOPE_SYS
+; DAGISEL-NEXT:    s_wait_storecnt 0x0
+; DAGISEL-NEXT:    v_cmpx_gt_i32_e32 0x43, v8
+; DAGISEL-NEXT:  ; %bb.1: ; %if.then
+; DAGISEL-NEXT:    v_mov_b32_e32 v0, s32
+; DAGISEL-NEXT:  ; %bb.2: ; %if.end
+; DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; DAGISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s0
+; DAGISEL-NEXT:    scratch_store_b32 v9, v0, off scope:SCOPE_SYS
+; DAGISEL-NEXT:    s_wait_storecnt 0x0
+; DAGISEL-NEXT:    s_alloc_vgpr 0
+; DAGISEL-NEXT:    s_endpgm
+;
+; GISEL-LABEL: sponentry_cs_chain:
+; GISEL:       ; %bb.0: ; %entry
+; GISEL-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GISEL-NEXT:    s_wait_expcnt 0x0
+; GISEL-NEXT:    s_wait_samplecnt 0x0
+; GISEL-NEXT:    s_wait_bvhcnt 0x0
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    v_mov_b32_e32 v0, v9
+; GISEL-NEXT:    s_mov_b32 s0, exec_lo
+; GISEL-NEXT:    s_wait_storecnt 0x0
+; GISEL-NEXT:    scratch_store_b32 off, v8, s32 offset:4 scope:SCOPE_SYS
+; GISEL-NEXT:    s_wait_storecnt 0x0
+; GISEL-NEXT:    v_cmpx_gt_i32_e32 0x43, v8
+; GISEL-NEXT:  ; %bb.1: ; %if.then
+; GISEL-NEXT:    s_mov_b32 s1, s32
+; GISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GISEL-NEXT:    v_mov_b32_e32 v0, s1
+; GISEL-NEXT:  ; %bb.2: ; %if.end
+; GISEL-NEXT:    s_or_b32 exec_lo, exec_lo, s0
+; GISEL-NEXT:    scratch_store_b32 v9, v0, off scope:SCOPE_SYS
+; GISEL-NEXT:    s_wait_storecnt 0x0
+; GISEL-NEXT:    s_alloc_vgpr 0
+; GISEL-NEXT:    s_endpgm
+entry:
+  %local = alloca i32, addrspace(5)
+  store volatile i32 %val, ptr addrspace(5) %local
+  %which = icmp slt i32 %val, 67
+  br i1 %which, label %if.then, label %if.end
+
+if.then:
+  %stack.base = call ptr addrspace(5) @llvm.sponentry()
+  br label %if.end
+
+if.end:
+  %ret = phi ptr addrspace(5) [ %stack.base, %if.then ], [ %ptr, %entry ]
+  store volatile ptr addrspace(5) %ret, ptr addrspace(5) %ptr
+  ret void
+}
+
+; FIXME: Optimize away the 4 bytes for the sponentry frame index.
+; CHECK: ScratchSize: 12
+
+define amdgpu_cs_chain void @sponentry_cs_chain_dvgpr_realign(i32 %val) #0 {
+; CHECK-LABEL: sponentry_cs_chain_dvgpr_realign:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT:    s_wait_expcnt 0x0
+; CHECK-NEXT:    s_wait_samplecnt 0x0
+; CHECK-NEXT:    s_wait_bvhcnt 0x0
+; CHECK-NEXT:    s_wait_kmcnt 0x0
+; CHECK-NEXT:    s_mov_b32 s34, s32
+; CHECK-NEXT:    s_add_co_i32 s33, s32, 0x7f
+; CHECK-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; CHECK-NEXT:    v_mov_b32_e32 v0, s34
+; CHECK-NEXT:    s_and_b32 s33, s33, 0xffffff80
+; CHECK-NEXT:    s_addk_co_i32 s32, 0x180
+; CHECK-NEXT:    s_wait_storecnt 0x0
+; CHECK-NEXT:    scratch_store_b32 off, v8, s33 offset:128 scope:SCOPE_SYS
+; CHECK-NEXT:    s_wait_storecnt 0x0
+; CHECK-NEXT:    scratch_store_b32 off, v0, s33 offset:128 scope:SCOPE_SYS
+; CHECK-NEXT:    s_wait_storecnt 0x0
+; CHECK-NEXT:    s_alloc_vgpr 0
+; CHECK-NEXT:    s_endpgm
+  %boop = alloca i32, addrspace(5)
+  %local = alloca i32, align 128, addrspace(5)
+  store volatile i32 %val, ptr addrspace(5) %local
+  %stack.base = call ptr addrspace(5) @llvm.sponentry()
+  store volatile ptr addrspace(5) %stack.base, ptr addrspace(5) %local
+  ret void
+}
+
+; FIXME: Optimize away the sponentry fixed object.
+; CHECK: ScratchSize: 384
+
+define amdgpu_cs_chain void @sponentry_cs_chain_dyn_alloc(i32 %val) #0 {
+; DAGISEL-LABEL: sponentry_cs_chain_dyn_alloc:
+; DAGISEL:       ; %bb.0:
+; DAGISEL-NEXT:    s_wait_loadcnt_dscnt 0x0
+; DAGISEL-NEXT:    s_wait_expcnt 0x0
+; DAGISEL-NEXT:    s_wait_samplecnt 0x0
+; DAGISEL-NEXT:    s_wait_bvhcnt 0x0
+; DAGISEL-NEXT:    s_wait_kmcnt 0x0
+; DAGISEL-NEXT:    v_lshl_add_u32 v0, v8, 2, 15
+; DAGISEL-NEXT:    s_mov_b32 s1, exec_lo
+; DAGISEL-NEXT:    s_mov_b32 s0, 0
+; DAGISEL-NEXT:    s_mov_b32 s33, s32
+; DAGISEL-NEXT:    s_add_co_i32 s32, s32, 16
+; DAGISEL-NEXT:    v_and_b32_e32 v0, -16, v0
+; DAGISEL-NEXT:  .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; DAGISEL-NEXT:    s_ctz_i32_b32 s2, s1
+; DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; DAGISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; DAGISEL-NEXT:    v_readlane_b32 s3, v0, s2
+; DAGISEL-NEXT:    s_bitset0_b32 s1, s2
+; DAGISEL-NEXT:    s_max_u32 s0, s0, s3
+; DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; DAGISEL-NEXT:    s_cmp_lg_u32 s1, 0
+; DAGISEL-NEXT:    s_cbranch_scc1 .LBB12_1
+; DAGISEL-NEXT:  ; %bb.2:
+; DAGISEL-NEXT:    s_mov_b32 s1, s32
+; DAGISEL-NEXT:    v_mov_b32_e32 v1, s33
+; DAGISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; DAGISEL-NEXT:    v_lshl_add_u32 v0, s0, 5, s1
+; DAGISEL-NEXT:    s_wait_storecnt 0x0
+; DAGISEL-NEXT:    scratch_store_b32 off, v8, s1 scope:SCOPE_SYS
+; DAGISEL-NEXT:    s_wait_storecnt 0x0
+; DAGISEL-NEXT:    scratch_store_b32 off, v1, s1 scope:SCOPE_SYS
+; DAGISEL-NEXT:    s_wait_storecnt 0x0
+; DAGISEL-NEXT:    v_readfirstlane_b32 s32, v0
+; DAGISEL-NEXT:    s_alloc_vgpr 0
+; DAGISEL-NEXT:    s_endpgm
+;
+; GISEL-LABEL: sponentry_cs_chain_dyn_alloc:
+; GISEL:       ; %bb.0:
+; GISEL-NEXT:    s_wait_loadcnt_dscnt 0x0
+; GISEL-NEXT:    s_wait_expcnt 0x0
+; GISEL-NEXT:    s_wait_samplecnt 0x0
+; GISEL-NEXT:    s_wait_bvhcnt 0x0
+; GISEL-NEXT:    s_wait_kmcnt 0x0
+; GISEL-NEXT:    v_lshl_add_u32 v0, v8, 2, 15
+; GISEL-NEXT:    s_mov_b32 s1, exec_lo
+; GISEL-NEXT:    s_mov_b32 s0, 0
+; GISEL-NEXT:    s_mov_b32 s33, s32
+; GISEL-NEXT:    s_add_co_i32 s32, s32, 16
+; GISEL-NEXT:    v_and_b32_e32 v0, -16, v0
+; GISEL-NEXT:  .LBB12_1: ; =>This Inner Loop Header: Depth=1
+; GISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GISEL-NEXT:    s_ctz_i32_b32 s2, s1
+; GISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GISEL-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GISEL-NEXT:    v_readlane_b32 s3, v0, s2
+; GISEL-NEXT:    s_bitset0_b32 s1, s2
+; GISEL-NEXT:    s_max_u32 s0, s0, s3
+; GISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GISEL-NEXT:    s_cmp_lg_u32 s1, 0
+; GISEL-NEXT:    s_cbranch_scc1 .LBB12_1
+; GISEL-NEXT:  ; %bb.2:
+; GISEL-NEXT:    v_mov_b32_e32 v0, s33
+; GISEL-NEXT:    s_mov_b32 s1, s32
+; GISEL-NEXT:    s_lshl_b32 s0, s0, 5
+; GISEL-NEXT:    s_wait_storecnt 0x0
+; GISEL-NEXT:    scratch_store_b32 off, v8, s1 scope:SCOPE_SYS
+; GISEL-NEXT:    s_wait_storecnt 0x0
+; GISEL-NEXT:    scratch_store_b32 off, v0, s1 scope:SCOPE_SYS
+; GISEL-NEXT:    s_wait_storecnt 0x0
+; GISEL-NEXT:    s_wait_alu depctr_sa_sdst(0)
+; GISEL-NEXT:    s_add_co_u32 s32, s1, s0
+; GISEL-NEXT:    s_alloc_vgpr 0
+; GISEL-NEXT:    s_endpgm
+  %local = alloca i32, i32 %val, addrspace(5)
+  store volatile i32 %val, ptr addrspace(5) %local
+  %stack.base = call ptr addrspace(5) @llvm.sponentry()
+  store volatile ptr addrspace(5) %stack.base, ptr addrspace(5) %local
+  ret void
+}
+
+; CHECK: ScratchSize: 16
+
 attributes #0 = { nounwind "amdgpu-dynamic-vgpr-block-size"="16" }
 attributes #1 = { nounwind "amdgpu-dynamic-vgpr-block-size"="32" }
 attributes #2 = { nounwind "amdgpu-dynamic-vgpr-block-size"="0" }

diff  --git a/llvm/test/CodeGen/AMDGPU/pei-amdgpu-cs-chain-preserve.mir b/llvm/test/CodeGen/AMDGPU/pei-amdgpu-cs-chain-preserve.mir
index 6121b447228b8..056816517e407 100644
--- a/llvm/test/CodeGen/AMDGPU/pei-amdgpu-cs-chain-preserve.mir
+++ b/llvm/test/CodeGen/AMDGPU/pei-amdgpu-cs-chain-preserve.mir
@@ -36,12 +36,12 @@ body:             |
     ; GCN-LABEL: name: preserve_active_lanes_above_args
     ; GCN: liveins: $sgpr0, $vgpr8, $vgpr9, $vgpr10
     ; GCN-NEXT: {{  $}}
-    ; GCN-NEXT: SCRATCH_STORE_DWORD_ST killed $vgpr10, 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" store (s32) into %stack.0, addrspace 5)
+    ; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR killed $vgpr10, $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" store (s32) into %stack.0, addrspace 5)
     ; GCN-NEXT: renamable $vgpr10 = V_MOV_B32_e32 10, implicit $exec
     ; GCN-NEXT: $vgpr8 = COPY killed renamable $vgpr10
     ; GCN-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @callee + 4, target-flags(amdgpu-gotprel32-hi) @callee + 12, implicit-def dead $scc
     ; GCN-NEXT: renamable $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (p0) from got, addrspace 4)
-    ; GCN-NEXT: $vgpr10 = SCRATCH_LOAD_DWORD_ST 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" load (s32) from %stack.0, addrspace 5)
+    ; GCN-NEXT: $vgpr10 = SCRATCH_LOAD_DWORD_SADDR $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" load (s32) from %stack.0, addrspace 5)
     ; GCN-NEXT: SI_CS_CHAIN_TC_W32 killed renamable $sgpr4_sgpr5, @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $vgpr8, implicit $vgpr9
     renamable $vgpr10 = V_MOV_B32_e32 10, implicit $exec
     $vgpr8 = COPY renamable killed $vgpr10
@@ -70,7 +70,7 @@ body:             |
     ; GCN: liveins: $sgpr0, $sgpr35, $vgpr8, $vgpr9, $vgpr10
     ; GCN-NEXT: {{  $}}
     ; GCN-NEXT: $sgpr1 = S_OR_SAVEEXEC_B32 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
-    ; GCN-NEXT: SCRATCH_STORE_DWORD_ST $vgpr10, 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" store (s32) into %stack.0, addrspace 5)
+    ; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR $vgpr10, $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" store (s32) into %stack.0, addrspace 5)
     ; GCN-NEXT: $exec_lo = S_MOV_B32 killed $sgpr1
     ; GCN-NEXT: S_NOP 0, implicit-def $vgpr0, implicit-def $vgpr1, implicit-def $vgpr2, implicit-def $vgpr3, implicit-def $vgpr4, implicit-def $vgpr5, implicit-def $vgpr6, implicit-def $vgpr7
     ; GCN-NEXT: $vgpr10 = SI_SPILL_S32_TO_VGPR $sgpr35, 0, killed $vgpr10
@@ -81,7 +81,7 @@ body:             |
     ; GCN-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @callee + 4, target-flags(amdgpu-gotprel32-hi) @callee + 12, implicit-def dead $scc
     ; GCN-NEXT: renamable $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (p0) from got, addrspace 4)
     ; GCN-NEXT: $sgpr1 = S_OR_SAVEEXEC_B32 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
-    ; GCN-NEXT: $vgpr10 = SCRATCH_LOAD_DWORD_ST 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" load (s32) from %stack.0, addrspace 5)
+    ; GCN-NEXT: $vgpr10 = SCRATCH_LOAD_DWORD_SADDR $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" load (s32) from %stack.0, addrspace 5)
     ; GCN-NEXT: $exec_lo = S_MOV_B32 killed $sgpr1
     ; GCN-NEXT: SI_CS_CHAIN_TC_W32 killed renamable $sgpr4_sgpr5, @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $vgpr8, implicit $vgpr9
     S_NOP 0, implicit-def $vgpr0, implicit-def $vgpr1, implicit-def $vgpr2, implicit-def $vgpr3, implicit-def $vgpr4, implicit-def $vgpr5, implicit-def $vgpr6, implicit-def $vgpr7
@@ -144,7 +144,7 @@ body:             |
     ; GCN: liveins: $sgpr0, $sgpr35, $vgpr8, $vgpr9, $vgpr10
     ; GCN-NEXT: {{  $}}
     ; GCN-NEXT: $sgpr1 = S_OR_SAVEEXEC_B32 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
-    ; GCN-NEXT: SCRATCH_STORE_DWORD_ST $vgpr9, 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" store (s32) into %stack.0, addrspace 5)
+    ; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR $vgpr9, $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" store (s32) into %stack.0, addrspace 5)
     ; GCN-NEXT: $exec_lo = S_MOV_B32 killed $sgpr1
     ; GCN-NEXT: S_NOP 0, implicit-def $vgpr0, implicit-def $vgpr1, implicit-def $vgpr2, implicit-def $vgpr3, implicit-def $vgpr4, implicit-def $vgpr5, implicit-def $vgpr6, implicit-def $vgpr7
     ; GCN-NEXT: $vgpr8 = SI_SPILL_S32_TO_VGPR $sgpr35, 0, killed $vgpr8
@@ -154,7 +154,7 @@ body:             |
     ; GCN-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @callee + 4, target-flags(amdgpu-gotprel32-hi) @callee + 12, implicit-def dead $scc
     ; GCN-NEXT: renamable $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (p0) from got, addrspace 4)
     ; GCN-NEXT: $sgpr1 = S_OR_SAVEEXEC_B32 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
-    ; GCN-NEXT: $vgpr9 = SCRATCH_LOAD_DWORD_ST 0, 0, implicit $exec, implicit $flat_scr, implicit $vgpr9(tied-def 0) :: ("amdgpu-thread-private" load (s32) from %stack.0, addrspace 5)
+    ; GCN-NEXT: $vgpr9 = SCRATCH_LOAD_DWORD_SADDR $sgpr32, 0, 0, implicit $exec, implicit $flat_scr, implicit $vgpr9(tied-def 0) :: ("amdgpu-thread-private" load (s32) from %stack.0, addrspace 5)
     ; GCN-NEXT: $exec_lo = S_MOV_B32 killed $sgpr1
     ; GCN-NEXT: SI_CS_CHAIN_TC_W32 killed renamable $sgpr4_sgpr5, @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $vgpr8, implicit $vgpr9
     S_NOP 0, implicit-def $vgpr0, implicit-def $vgpr1, implicit-def $vgpr2, implicit-def $vgpr3, implicit-def $vgpr4, implicit-def $vgpr5, implicit-def $vgpr6, implicit-def $vgpr7

diff  --git a/llvm/test/CodeGen/AMDGPU/pei-amdgpu-cs-chain.mir b/llvm/test/CodeGen/AMDGPU/pei-amdgpu-cs-chain.mir
index 69d857e11ac9b..9caf6d64e28a2 100644
--- a/llvm/test/CodeGen/AMDGPU/pei-amdgpu-cs-chain.mir
+++ b/llvm/test/CodeGen/AMDGPU/pei-amdgpu-cs-chain.mir
@@ -38,15 +38,15 @@ body:             |
     ; GCN: liveins: $sgpr0, $sgpr35, $vgpr8, $vgpr9
     ; GCN-NEXT: {{  $}}
     ; GCN-NEXT: $sgpr1 = S_XOR_SAVEEXEC_B32 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
-    ; GCN-NEXT: SCRATCH_STORE_DWORD_ST $vgpr8, 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" store (s32) into %stack.0, addrspace 5)
-    ; GCN-NEXT: SCRATCH_STORE_DWORD_ST $vgpr9, 4, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" store (s32) into %stack.1, addrspace 5)
+    ; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR $vgpr8, $sgpr32, 0, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" store (s32) into %stack.0, addrspace 5)
+    ; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR $vgpr9, $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" store (s32) into %stack.1, addrspace 5)
     ; GCN-NEXT: $exec_lo = S_MOV_B32 killed $sgpr1
     ; GCN-NEXT: renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @callee + 4, target-flags(amdgpu-gotprel32-hi) @callee + 12, implicit-def dead $scc
     ; GCN-NEXT: renamable $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (p0) from got, addrspace 4)
     ; GCN-NEXT: S_NOP 0, implicit-def $vgpr0, implicit-def $vgpr1, implicit-def $vgpr2, implicit-def $vgpr3, implicit-def $vgpr4, implicit-def $vgpr5, implicit-def $vgpr6, implicit-def $vgpr7
     ; GCN-NEXT: $sgpr1 = S_XOR_SAVEEXEC_B32 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
-    ; GCN-NEXT: $vgpr8 = SCRATCH_LOAD_DWORD_ST 0, 0, implicit $exec, implicit $flat_scr, implicit $vgpr8(tied-def 0) :: ("amdgpu-thread-private" load (s32) from %stack.0, addrspace 5)
-    ; GCN-NEXT: $vgpr9 = SCRATCH_LOAD_DWORD_ST 4, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" load (s32) from %stack.1, addrspace 5)
+    ; GCN-NEXT: $vgpr8 = SCRATCH_LOAD_DWORD_SADDR $sgpr32, 0, 0, implicit $exec, implicit $flat_scr, implicit $vgpr8(tied-def 0) :: ("amdgpu-thread-private" load (s32) from %stack.0, addrspace 5)
+    ; GCN-NEXT: $vgpr9 = SCRATCH_LOAD_DWORD_SADDR $sgpr32, 4, 0, implicit $exec, implicit $flat_scr :: ("amdgpu-thread-private" load (s32) from %stack.1, addrspace 5)
     ; GCN-NEXT: $exec_lo = S_MOV_B32 killed $sgpr1
     ; GCN-NEXT: SI_CS_CHAIN_TC_W32 killed renamable $sgpr4_sgpr5, @callee, 0, -1, amdgpu_allvgprs, implicit $sgpr0, implicit $vgpr8
     renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @callee + 4, target-flags(amdgpu-gotprel32-hi) @callee + 12, implicit-def dead $scc


        


More information about the llvm-commits mailing list