[llvm] 9a53f5f - AMDGPU: Handle llvm.stacksave and llvm.stackrestore

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 11 07:25:09 PDT 2023


Author: Matt Arsenault
Date: 2023-08-11T10:25:01-04:00
New Revision: 9a53f5f5c466b8d2a63827ec33dd97e4333db6ce

URL: https://github.com/llvm/llvm-project/commit/9a53f5f5c466b8d2a63827ec33dd97e4333db6ce
DIFF: https://github.com/llvm/llvm-project/commit/9a53f5f5c466b8d2a63827ec33dd97e4333db6ce.diff

LOG: AMDGPU: Handle llvm.stacksave and llvm.stackrestore

Not sure if the only valid use is to have stackrestore directly
consume stacksave outputs or not. Handled exactly like a regular stack
pointer so all the edge cases theoretically should work.

https://reviews.llvm.org/D156669

Added: 
    llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.invalid.ll
    llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll

Modified: 
    llvm/docs/AMDGPUUsage.rst
    llvm/docs/ReleaseNotes.rst
    llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
    llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.h

Removed: 
    


################################################################################
diff  --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 0a7ae20e9b1c8c..79d93b42cd51e3 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -962,63 +962,66 @@ The AMDGPU backend implements the following LLVM IR intrinsics.
 .. table:: AMDGPU LLVM IR Intrinsics
   :name: amdgpu-llvm-ir-intrinsics-table
 
-  =========================================  ==========================================================
-  LLVM Intrinsic                             Description
-  =========================================  ==========================================================
-  llvm.amdgcn.sqrt                           Provides direct access to v_sqrt_f64, v_sqrt_f32 and v_sqrt_f16
-                                             (on targets with half support). Peforms sqrt function.
-
-  llvm.amdgcn.log                            Provides direct access to v_log_f32 and v_log_f16
-                                             (on targets with half support). Peforms log2 function.
-
-  llvm.amdgcn.exp2                           Provides direct access to v_exp_f32 and v_exp_f16
-                                             (on targets with half support). Performs exp2 function.
-
-  :ref:`llvm.frexp <int_frexp>`              Implemented for half, float and double.
-
-  :ref:`llvm.log2 <int_log2>`                Implemented for float and half (and vectors of float or
-                                             half). Not implemented for double. Hardware provides
-                                             1ULP accuracy for float, and 0.51ULP for half. Float
-                                             instruction does not natively support denormal
-                                             inputs. Backend will optimize out denormal scaling if
-                                             marked with the :ref:`afn <fastmath_afn>` flag.
-
-  :ref:`llvm.sqrt <int_sqrt>`                Implemented for double, float and half (and vectors).
-
-  :ref:`llvm.log <int_log>`                  Implemented for float and half (and vectors).
-
-  :ref:`llvm.exp <int_exp>`                  Implemented for float and half (and vectors).
-
-  :ref:`llvm.log10 <int_log10>`              Implemented for float and half (and vectors).
-
-  :ref:`llvm.exp2 <int_exp2>`                Implemented for float and half (and vectors of float or
-                                             half). Not implemented for double. Hardware provides
-                                             1ULP accuracy for float, and 0.51ULP for half. Float
-                                             instruction does not natively support denormal
-                                             inputs. Backend will optimize out denormal scaling if
-                                             marked with the :ref:`afn <fastmath_afn>` flag.
-
-  llvm.amdgcn.wave.reduce.umin               Performs an arithmetic unsigned min reduction on the unsigned values
-                                             provided by each lane in the wavefront.
-                                             Intrinsic takes a hint for reduction strategy using second operand
-                                             0: Target default preference,
-                                             1: `Iterative strategy`, and
-                                             2: `DPP`.
-                                             If target does not support the DPP operations (e.g. gfx6/7),
-                                             reduction will be performed using default iterative strategy.
-                                             Intrinsic is currently only implemented for i32.
-
-  llvm.amdgcn.wave.reduce.umax               Performs an arithmetic unsigned max reduction on the unsigned values
-                                             provided by each lane in the wavefront.
-                                             Intrinsic takes a hint for reduction strategy using second operand
-                                             0: Target default preference,
-                                             1: `Iterative strategy`, and
-                                             2: `DPP`.
-                                             If target does not support the DPP operations (e.g. gfx6/7),
-                                             reduction will be performed using default iterative strategy.
-                                             Intrinsic is currently only implemented for i32.
-
-  =========================================  ==========================================================
+  ==============================================   ==========================================================
+  LLVM Intrinsic                                   Description
+  ==============================================   ==========================================================
+  llvm.amdgcn.sqrt                                 Provides direct access to v_sqrt_f64, v_sqrt_f32 and v_sqrt_f16
+                                                   (on targets with half support). Peforms sqrt function.
+
+  llvm.amdgcn.log                                  Provides direct access to v_log_f32 and v_log_f16
+                                                   (on targets with half support). Peforms log2 function.
+
+  llvm.amdgcn.exp2                                 Provides direct access to v_exp_f32 and v_exp_f16
+                                                   (on targets with half support). Performs exp2 function.
+
+  :ref:`llvm.frexp <int_frexp>`                    Implemented for half, float and double.
+
+  :ref:`llvm.log2 <int_log2>`                      Implemented for float and half (and vectors of float or
+                                                   half). Not implemented for double. Hardware provides
+                                                   1ULP accuracy for float, and 0.51ULP for half. Float
+                                                   instruction does not natively support denormal
+                                                   inputs. Backend will optimize out denormal scaling if
+                                                   marked with the :ref:`afn <fastmath_afn>` flag.
+
+  :ref:`llvm.sqrt <int_sqrt>`                      Implemented for double, float and half (and vectors).
+
+  :ref:`llvm.log <int_log>`                        Implemented for float and half (and vectors).
+
+  :ref:`llvm.exp <int_exp>`                        Implemented for float and half (and vectors).
+
+  :ref:`llvm.log10 <int_log10>`                    Implemented for float and half (and vectors).
+
+  :ref:`llvm.exp2 <int_exp2>`                      Implemented for float and half (and vectors of float or
+                                                   half). Not implemented for double. Hardware provides
+                                                   1ULP accuracy for float, and 0.51ULP for half. Float
+                                                   instruction does not natively support denormal
+                                                   inputs. Backend will optimize out denormal scaling if
+                                                   marked with the :ref:`afn <fastmath_afn>` flag.
+
+  :ref:`llvm.stacksave.p5 <int_stacksave>`         Implemented, must use the alloca address space.
+  :ref:`llvm.stackrestore.p5 <int_stackrestore>`   Implemented, must use the alloca address space.
+
+  llvm.amdgcn.wave.reduce.umin                     Performs an arithmetic unsigned min reduction on the unsigned values
+                                                   provided by each lane in the wavefront.
+                                                   Intrinsic takes a hint for reduction strategy using second operand
+                                                   0: Target default preference,
+                                                   1: `Iterative strategy`, and
+                                                   2: `DPP`.
+                                                   If target does not support the DPP operations (e.g. gfx6/7),
+                                                   reduction will be performed using default iterative strategy.
+                                                   Intrinsic is currently only implemented for i32.
+
+  llvm.amdgcn.wave.reduce.umax                     Performs an arithmetic unsigned max reduction on the unsigned values
+                                                   provided by each lane in the wavefront.
+                                                   Intrinsic takes a hint for reduction strategy using second operand
+                                                   0: Target default preference,
+                                                   1: `Iterative strategy`, and
+                                                   2: `DPP`.
+                                                   If target does not support the DPP operations (e.g. gfx6/7),
+                                                   reduction will be performed using default iterative strategy.
+                                                   Intrinsic is currently only implemented for i32.
+
+  ==============================================   ==========================================================
 
 .. TODO::
 

diff  --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 70185b12ccf31a..3d50c3f6fd8f95 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -74,6 +74,8 @@ Changes to the AMDGPU Backend
 * `llvm.sqrt.f64` is now lowered correctly. Use `llvm.amdgcn.sqrt.f64`
   for raw instruction access.
 
+* Implemented `llvm.stacksave` and `llvm.stackrestore` intrinsics.
+
 Changes to the ARM Backend
 --------------------------
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 73820375a1e6e4..45f1e201623e88 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -692,6 +692,14 @@ void AMDGPUDAGToDAGISel::Select(SDNode *N) {
     SelectINTRINSIC_VOID(N);
     return;
   }
+  case AMDGPUISD::WAVE_ADDRESS: {
+    SelectWAVE_ADDRESS(N);
+    return;
+  }
+  case ISD::STACKRESTORE: {
+    SelectSTACKRESTORE(N);
+    return;
+  }
   }
 
   SelectCode(N);
@@ -2569,6 +2577,45 @@ void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
   SelectCode(N);
 }
 
+void AMDGPUDAGToDAGISel::SelectWAVE_ADDRESS(SDNode *N) {
+  SDValue Log2WaveSize =
+    CurDAG->getTargetConstant(Subtarget->getWavefrontSizeLog2(), SDLoc(N), MVT::i32);
+  CurDAG->SelectNodeTo(N, AMDGPU::S_LSHR_B32, N->getVTList(),
+                       {N->getOperand(0), Log2WaveSize});
+}
+
+void AMDGPUDAGToDAGISel::SelectSTACKRESTORE(SDNode *N) {
+  SDValue SrcVal = N->getOperand(1);
+  if (SrcVal.getValueType() != MVT::i32) {
+    SelectCode(N); // Emit default error
+    return;
+  }
+
+  SDValue CopyVal;
+  Register SP = TLI->getStackPointerRegisterToSaveRestore();
+  SDLoc SL(N);
+
+  if (SrcVal.getOpcode() == AMDGPUISD::WAVE_ADDRESS) {
+    CopyVal = SrcVal.getOperand(0);
+  } else {
+    SDValue Log2WaveSize = CurDAG->getTargetConstant(
+        Subtarget->getWavefrontSizeLog2(), SL, MVT::i32);
+
+    if (N->isDivergent()) {
+      SrcVal = SDValue(CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL,
+                                              MVT::i32, SrcVal),
+                       0);
+    }
+
+    CopyVal = SDValue(CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32,
+                                             {SrcVal, Log2WaveSize}),
+                      0);
+  }
+
+  SDValue CopyToSP = CurDAG->getCopyToReg(N->getOperand(0), SL, SP, CopyVal);
+  CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyToSP);
+}
+
 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
                                             unsigned &Mods,
                                             bool IsCanonicalizing,

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
index 0605baf3a0ccb2..06a03cfe02579d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.h
@@ -280,6 +280,8 @@ class AMDGPUDAGToDAGISel : public SelectionDAGISel {
   void SelectINTRINSIC_W_CHAIN(SDNode *N);
   void SelectINTRINSIC_WO_CHAIN(SDNode *N);
   void SelectINTRINSIC_VOID(SDNode *N);
+  void SelectWAVE_ADDRESS(SDNode *N);
+  void SelectSTACKRESTORE(SDNode *N);
 
 protected:
   // Include the pieces autogenerated from the target description.

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 6e7d3bd5c36d90..2ed66b1d57c4de 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -5120,6 +5120,7 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(TC_RETURN_GFX)
   NODE_NAME_CASE(TRAP)
   NODE_NAME_CASE(RET_GLUE)
+  NODE_NAME_CASE(WAVE_ADDRESS)
   NODE_NAME_CASE(RETURN_TO_EPILOG)
   NODE_NAME_CASE(ENDPGM)
   NODE_NAME_CASE(ENDPGM_TRAP)

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index 26b91155ba85df..70e67cdd134519 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -242,9 +242,7 @@ class AMDGPUTargetLowering : public TargetLowering {
   SDValue LowerCall(CallLoweringInfo &CLI,
                     SmallVectorImpl<SDValue> &InVals) const override;
 
-  SDValue LowerDYNAMIC_STACKALLOC(SDValue Op,
-                                  SelectionDAG &DAG) const;
-
+  SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
   void ReplaceNodeResults(SDNode * N,
@@ -410,6 +408,10 @@ enum NodeType : unsigned {
   // Return with values from a non-entry function.
   RET_GLUE,
 
+  // Convert a unswizzled wave uniform stack address to an address compatible
+  // with a vector offset for use in stack access.
+  WAVE_ADDRESS,
+
   DWORDADDR,
   FRACT,
 

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 0857e841bf8292..f7d1f3d5f50e65 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -754,6 +754,8 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
                       MVT::i8, MVT::i128},
                      Custom);
 
+  setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
+
   setTargetDAGCombine({ISD::ADD,
                        ISD::UADDO_CARRY,
                        ISD::SUB,
@@ -3521,6 +3523,23 @@ SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
   return AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(Op, DAG);
 }
 
+SDValue SITargetLowering::LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const {
+  if (Op.getValueType() != MVT::i32)
+    return Op; // Defer to cannot select error.
+
+  Register SP = getStackPointerRegisterToSaveRestore();
+  SDLoc SL(Op);
+
+  SDValue CopyFromSP = DAG.getCopyFromReg(Op->getOperand(0), SL, SP, MVT::i32);
+
+  // Convert from wave uniform to swizzled vector address. This should protect
+  // from any edge cases where the stacksave result isn't directly used with
+  // stackrestore.
+  SDValue VectorAddress =
+      DAG.getNode(AMDGPUISD::WAVE_ADDRESS, SL, MVT::i32, CopyFromSP);
+  return DAG.getMergeValues({VectorAddress, CopyFromSP.getValue(1)}, SL);
+}
+
 Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT,
                                              const MachineFunction &MF) const {
   Register Reg = StringSwitch<Register>(RegName)
@@ -5028,6 +5047,8 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
     return lowerXMUL_LOHI(Op, DAG);
   case ISD::DYNAMIC_STACKALLOC:
     return LowerDYNAMIC_STACKALLOC(Op, DAG);
+  case ISD::STACKSAVE:
+    return LowerSTACKSAVE(Op, DAG);
   }
   return SDValue();
 }

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 1745c0b9e88ea2..b9845593b1abd9 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -409,6 +409,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
 
   SDValue lowerDYNAMIC_STACKALLOCImpl(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const;
 
   Register getRegisterByName(const char* RegName, LLT VT,
                              const MachineFunction &MF) const override;

diff  --git a/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.invalid.ll b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.invalid.ll
new file mode 100644
index 00000000000000..fe577d4c70c18c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.invalid.ll
@@ -0,0 +1,27 @@
+; RUN: split-file %s %t
+; RUN: not --crash llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 -filetype=null %t/stacksave-error.ll 2>&1 | FileCheck -check-prefix=ERR-SAVE %s
+; RUN: not --crash llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 -filetype=null %t/stackrestore-error.ll 2>&1 | FileCheck -check-prefix=ERR-RESTORE %s
+
+; Test that an error is produced if stacksave/stackrestore are used
+; with the wrong (default) address space.
+
+;--- stacksave-error.ll
+
+declare ptr @llvm.stacksave.p0()
+
+; ERR-SAVE: LLVM ERROR: Cannot select: {{.+}}: i64,ch = stacksave
+define void @func_store_stacksave() {
+  %stacksave = call ptr @llvm.stacksave.p0()
+  call void asm sideeffect "; use $0", "s"(ptr %stacksave)
+  ret void
+}
+
+;--- stackrestore-error.ll
+
+declare void @llvm.stackrestore.p0(ptr)
+
+; ERR-RESTORE: LLVM ERROR: Cannot select: {{.+}}: ch = stackrestore {{.+}}, {{.+}}
+define amdgpu_gfx void @func_stacksave_sgpr(ptr inreg %stack) {
+  call void @llvm.stackrestore.p0(ptr %stack)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
new file mode 100644
index 00000000000000..f3fb0425b57a5f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/stacksave_stackrestore.ll
@@ -0,0 +1,1315 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GCN,WAVE32,WAVE32-OPT %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GCN,WAVE64,WAVE64-OPT %s
+
+; RUN: llc -O0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GCN,WAVE32,WAVE32-O0 %s
+; RUN: llc -O0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GCN,WAVE64,WAVE64-O0 %s
+
+declare ptr addrspace(5) @llvm.stacksave.p5()
+declare void @llvm.stackrestore.p5(ptr addrspace(5))
+
+define hidden void @stack_passed_argument([32 x i32], i32) {
+; GCN-LABEL: stack_passed_argument:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_setpc_b64 s[30:31]
+  ret void
+}
+
+define void @func_store_stacksave() {
+; WAVE32-OPT-LABEL: func_store_stacksave:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_lshr_b32 s4, s32, 5
+; WAVE32-OPT-NEXT:    ;;#ASMSTART
+; WAVE32-OPT-NEXT:    ; use s4
+; WAVE32-OPT-NEXT:    ;;#ASMEND
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_store_stacksave:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_lshr_b32 s4, s32, 6
+; WAVE64-OPT-NEXT:    ;;#ASMSTART
+; WAVE64-OPT-NEXT:    ; use s4
+; WAVE64-OPT-NEXT:    ;;#ASMEND
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_store_stacksave:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_mov_b32 s4, s32
+; WAVE32-O0-NEXT:    s_lshr_b32 s4, s4, 5
+; WAVE32-O0-NEXT:    ;;#ASMSTART
+; WAVE32-O0-NEXT:    ; use s4
+; WAVE32-O0-NEXT:    ;;#ASMEND
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_store_stacksave:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_mov_b32 s4, s32
+; WAVE64-O0-NEXT:    s_lshr_b32 s4, s4, 6
+; WAVE64-O0-NEXT:    ;;#ASMSTART
+; WAVE64-O0-NEXT:    ; use s4
+; WAVE64-O0-NEXT:    ;;#ASMEND
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
+  call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
+  ret void
+}
+
+define amdgpu_kernel void @kernel_store_stacksave() {
+; WAVE32-OPT-LABEL: kernel_store_stacksave:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_lshr_b32 s0, s32, 5
+; WAVE32-OPT-NEXT:    ;;#ASMSTART
+; WAVE32-OPT-NEXT:    ; use s0
+; WAVE32-OPT-NEXT:    ;;#ASMEND
+; WAVE32-OPT-NEXT:    s_endpgm
+;
+; WAVE64-OPT-LABEL: kernel_store_stacksave:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_lshr_b32 s0, s32, 6
+; WAVE64-OPT-NEXT:    ;;#ASMSTART
+; WAVE64-OPT-NEXT:    ; use s0
+; WAVE64-OPT-NEXT:    ;;#ASMEND
+; WAVE64-OPT-NEXT:    s_endpgm
+;
+; WAVE32-O0-LABEL: kernel_store_stacksave:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_mov_b32 s0, s32
+; WAVE32-O0-NEXT:    s_lshr_b32 s0, s0, 5
+; WAVE32-O0-NEXT:    ;;#ASMSTART
+; WAVE32-O0-NEXT:    ; use s0
+; WAVE32-O0-NEXT:    ;;#ASMEND
+; WAVE32-O0-NEXT:    s_endpgm
+;
+; WAVE64-O0-LABEL: kernel_store_stacksave:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_mov_b32 s0, s32
+; WAVE64-O0-NEXT:    s_lshr_b32 s0, s0, 6
+; WAVE64-O0-NEXT:    ;;#ASMSTART
+; WAVE64-O0-NEXT:    ; use s0
+; WAVE64-O0-NEXT:    ;;#ASMEND
+; WAVE64-O0-NEXT:    s_endpgm
+  %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
+  call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
+  ret void
+}
+
+define amdgpu_kernel void @kernel_store_stacksave_nocall() {
+; WAVE32-OPT-LABEL: kernel_store_stacksave_nocall:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_getpc_b64 s[4:5]
+; WAVE32-OPT-NEXT:    s_mov_b32 s4, s0
+; WAVE32-OPT-NEXT:    v_mov_b32_e32 v0, 0
+; WAVE32-OPT-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x0
+; WAVE32-OPT-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_bitset0_b32 s7, 21
+; WAVE32-OPT-NEXT:    s_add_u32 s4, s4, s1
+; WAVE32-OPT-NEXT:    s_addc_u32 s5, s5, 0
+; WAVE32-OPT-NEXT:    s_lshr_b32 s0, s32, 5
+; WAVE32-OPT-NEXT:    v_mov_b32_e32 v1, s0
+; WAVE32-OPT-NEXT:    buffer_store_dword v0, v1, s[4:7], 0 offen
+; WAVE32-OPT-NEXT:    s_endpgm
+;
+; WAVE64-OPT-LABEL: kernel_store_stacksave_nocall:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_getpc_b64 s[4:5]
+; WAVE64-OPT-NEXT:    s_mov_b32 s4, s0
+; WAVE64-OPT-NEXT:    v_mov_b32_e32 v0, 0
+; WAVE64-OPT-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x0
+; WAVE64-OPT-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_add_u32 s4, s4, s1
+; WAVE64-OPT-NEXT:    s_addc_u32 s5, s5, 0
+; WAVE64-OPT-NEXT:    s_lshr_b32 s0, s32, 6
+; WAVE64-OPT-NEXT:    v_mov_b32_e32 v1, s0
+; WAVE64-OPT-NEXT:    buffer_store_dword v0, v1, s[4:7], 0 offen
+; WAVE64-OPT-NEXT:    s_endpgm
+;
+; WAVE32-O0-LABEL: kernel_store_stacksave_nocall:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_getpc_b64 s[12:13]
+; WAVE32-O0-NEXT:    s_mov_b32 s12, s0
+; WAVE32-O0-NEXT:    s_load_dwordx4 s[12:15], s[12:13], 0x0
+; WAVE32-O0-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_bitset0_b32 s15, 21
+; WAVE32-O0-NEXT:    s_add_u32 s12, s12, s11
+; WAVE32-O0-NEXT:    s_addc_u32 s13, s13, 0
+; WAVE32-O0-NEXT:    s_mov_b32 s0, s32
+; WAVE32-O0-NEXT:    s_lshr_b32 s0, s0, 5
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v0, 0
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v1, s0
+; WAVE32-O0-NEXT:    buffer_store_dword v0, v1, s[12:15], 0 offen
+; WAVE32-O0-NEXT:    s_endpgm
+;
+; WAVE64-O0-LABEL: kernel_store_stacksave_nocall:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_getpc_b64 s[12:13]
+; WAVE64-O0-NEXT:    s_mov_b32 s12, s0
+; WAVE64-O0-NEXT:    s_load_dwordx4 s[12:15], s[12:13], 0x0
+; WAVE64-O0-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_add_u32 s12, s12, s11
+; WAVE64-O0-NEXT:    s_addc_u32 s13, s13, 0
+; WAVE64-O0-NEXT:    s_mov_b32 s0, s32
+; WAVE64-O0-NEXT:    s_lshr_b32 s0, s0, 6
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v0, 0
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v1, s0
+; WAVE64-O0-NEXT:    buffer_store_dword v0, v1, s[12:15], 0 offen
+; WAVE64-O0-NEXT:    s_endpgm
+  %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
+  store i32 0, ptr addrspace(5) %stacksave
+  ret void
+}
+
+define void @func_stacksave_nonentry_block(i1 %cond) {
+; WAVE32-OPT-LABEL: func_stacksave_nonentry_block:
+; WAVE32-OPT:       ; %bb.0: ; %bb0
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    v_and_b32_e32 v0, 1, v0
+; WAVE32-OPT-NEXT:    s_mov_b32 s4, exec_lo
+; WAVE32-OPT-NEXT:    v_cmpx_eq_u32_e32 1, v0
+; WAVE32-OPT-NEXT:    s_cbranch_execz .LBB4_2
+; WAVE32-OPT-NEXT:  ; %bb.1: ; %bb1
+; WAVE32-OPT-NEXT:    s_lshr_b32 s5, s32, 5
+; WAVE32-OPT-NEXT:    ;;#ASMSTART
+; WAVE32-OPT-NEXT:    ; use s5
+; WAVE32-OPT-NEXT:    ;;#ASMEND
+; WAVE32-OPT-NEXT:  .LBB4_2: ; %bb2
+; WAVE32-OPT-NEXT:    s_or_b32 exec_lo, exec_lo, s4
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stacksave_nonentry_block:
+; WAVE64-OPT:       ; %bb.0: ; %bb0
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    v_and_b32_e32 v0, 1, v0
+; WAVE64-OPT-NEXT:    s_mov_b64 s[4:5], exec
+; WAVE64-OPT-NEXT:    v_cmpx_eq_u32_e32 1, v0
+; WAVE64-OPT-NEXT:    s_cbranch_execz .LBB4_2
+; WAVE64-OPT-NEXT:  ; %bb.1: ; %bb1
+; WAVE64-OPT-NEXT:    s_lshr_b32 s6, s32, 6
+; WAVE64-OPT-NEXT:    ;;#ASMSTART
+; WAVE64-OPT-NEXT:    ; use s6
+; WAVE64-OPT-NEXT:    ;;#ASMEND
+; WAVE64-OPT-NEXT:  .LBB4_2: ; %bb2
+; WAVE64-OPT-NEXT:    s_or_b64 exec, exec, s[4:5]
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stacksave_nonentry_block:
+; WAVE32-O0:       ; %bb.0: ; %bb0
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_xor_saveexec_b32 s4, -1
+; WAVE32-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s4
+; WAVE32-O0-NEXT:    ; implicit-def: $vgpr1
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v1, v0
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s7, -1
+; WAVE32-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s7
+; WAVE32-O0-NEXT:    v_and_b32_e64 v1, 1, v1
+; WAVE32-O0-NEXT:    v_cmp_eq_u32_e64 s5, v1, 1
+; WAVE32-O0-NEXT:    s_mov_b32 s4, exec_lo
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE32-O0-NEXT:    v_writelane_b32 v0, s4, 0
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s7, -1
+; WAVE32-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s7
+; WAVE32-O0-NEXT:    s_and_b32 s4, s4, s5
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s4
+; WAVE32-O0-NEXT:    s_cbranch_execz .LBB4_2
+; WAVE32-O0-NEXT:  ; %bb.1: ; %bb1
+; WAVE32-O0-NEXT:    s_mov_b32 s4, s32
+; WAVE32-O0-NEXT:    s_lshr_b32 s4, s4, 5
+; WAVE32-O0-NEXT:    ;;#ASMSTART
+; WAVE32-O0-NEXT:    ; use s4
+; WAVE32-O0-NEXT:    ;;#ASMEND
+; WAVE32-O0-NEXT:  .LBB4_2: ; %bb2
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s7, -1
+; WAVE32-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s7
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE32-O0-NEXT:    v_readlane_b32 s4, v0, 0
+; WAVE32-O0-NEXT:    s_or_b32 exec_lo, exec_lo, s4
+; WAVE32-O0-NEXT:    ; kill: killed $vgpr0
+; WAVE32-O0-NEXT:    s_xor_saveexec_b32 s4, -1
+; WAVE32-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s4
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stacksave_nonentry_block:
+; WAVE64-O0:       ; %bb.0: ; %bb0
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_xor_saveexec_b64 s[4:5], -1
+; WAVE64-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; WAVE64-O0-NEXT:    ; implicit-def: $vgpr1
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v1, v0
+; WAVE64-O0-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; WAVE64-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[10:11]
+; WAVE64-O0-NEXT:    v_and_b32_e64 v1, 1, v1
+; WAVE64-O0-NEXT:    v_cmp_eq_u32_e64 s[6:7], v1, 1
+; WAVE64-O0-NEXT:    s_mov_b64 s[4:5], exec
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE64-O0-NEXT:    v_writelane_b32 v0, s4, 0
+; WAVE64-O0-NEXT:    v_writelane_b32 v0, s5, 1
+; WAVE64-O0-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; WAVE64-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[10:11]
+; WAVE64-O0-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; WAVE64-O0-NEXT:    s_cbranch_execz .LBB4_2
+; WAVE64-O0-NEXT:  ; %bb.1: ; %bb1
+; WAVE64-O0-NEXT:    s_mov_b32 s4, s32
+; WAVE64-O0-NEXT:    s_lshr_b32 s4, s4, 6
+; WAVE64-O0-NEXT:    ;;#ASMSTART
+; WAVE64-O0-NEXT:    ; use s4
+; WAVE64-O0-NEXT:    ;;#ASMEND
+; WAVE64-O0-NEXT:  .LBB4_2: ; %bb2
+; WAVE64-O0-NEXT:    s_or_saveexec_b64 s[10:11], -1
+; WAVE64-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[10:11]
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE64-O0-NEXT:    v_readlane_b32 s4, v0, 0
+; WAVE64-O0-NEXT:    v_readlane_b32 s5, v0, 1
+; WAVE64-O0-NEXT:    s_or_b64 exec, exec, s[4:5]
+; WAVE64-O0-NEXT:    ; kill: killed $vgpr0
+; WAVE64-O0-NEXT:    s_xor_saveexec_b64 s[4:5], -1
+; WAVE64-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+bb0:
+  br i1 %cond, label %bb1, label %bb2
+
+bb1:
+  %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
+  call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
+  br label %bb2
+
+bb2:
+  ret void
+}
+
+define void @func_stackrestore_poison() {
+; WAVE32-OPT-LABEL: func_stackrestore_poison:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_lshl_b32 s32, s4, 5
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stackrestore_poison:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_lshl_b32 s32, s4, 6
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stackrestore_poison:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr4
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_lshl_b32 s4, s4, 5
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stackrestore_poison:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr4
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_lshl_b32 s4, s4, 6
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  call void @llvm.stackrestore.p5(ptr addrspace(5) poison)
+  ret void
+}
+
+define void @func_stackrestore_null() {
+; WAVE32-OPT-LABEL: func_stackrestore_null:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_mov_b32 s32, 0
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stackrestore_null:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_mov_b32 s32, 0
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stackrestore_null:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_mov_b32 s4, 0
+; WAVE32-O0-NEXT:    s_lshl_b32 s4, s4, 5
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stackrestore_null:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_mov_b32 s4, 0
+; WAVE64-O0-NEXT:    s_lshl_b32 s4, s4, 6
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  call void @llvm.stackrestore.p5(ptr addrspace(5) null)
+  ret void
+}
+
+define void @func_stackrestore_neg1() {
+; WAVE32-OPT-LABEL: func_stackrestore_neg1:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_movk_i32 s32, 0xffe0
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stackrestore_neg1:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_movk_i32 s32, 0xffc0
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stackrestore_neg1:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_mov_b32 s4, -1
+; WAVE32-O0-NEXT:    s_lshl_b32 s4, s4, 5
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stackrestore_neg1:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_mov_b32 s4, -1
+; WAVE64-O0-NEXT:    s_lshl_b32 s4, s4, 6
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  call void @llvm.stackrestore.p5(ptr addrspace(5) inttoptr (i32 -1 to ptr addrspace(5)))
+  ret void
+}
+
+define void @func_stackrestore_42() {
+; WAVE32-OPT-LABEL: func_stackrestore_42:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_movk_i32 s32, 0x540
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stackrestore_42:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_movk_i32 s32, 0xa80
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stackrestore_42:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_mov_b32 s4, 42
+; WAVE32-O0-NEXT:    s_lshl_b32 s4, s4, 5
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stackrestore_42:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_mov_b32 s4, 42
+; WAVE64-O0-NEXT:    s_lshl_b32 s4, s4, 6
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  call void @llvm.stackrestore.p5(ptr addrspace(5) inttoptr (i32 42 to ptr addrspace(5)))
+  ret void
+}
+
+define void @func_stacksave_stackrestore() {
+; WAVE32-OPT-LABEL: func_stacksave_stackrestore:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stacksave_stackrestore:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stacksave_stackrestore:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_mov_b32 s4, s32
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stacksave_stackrestore:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_mov_b32 s4, s32
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
+  call void @llvm.stackrestore.p5(ptr addrspace(5) %stacksave)
+  ret void
+}
+
+define void @func_stacksave_stackrestore_use() {
+; WAVE32-OPT-LABEL: func_stacksave_stackrestore_use:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_lshr_b32 s4, s32, 5
+; WAVE32-OPT-NEXT:    ;;#ASMSTART
+; WAVE32-OPT-NEXT:    ; use s4
+; WAVE32-OPT-NEXT:    ;;#ASMEND
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stacksave_stackrestore_use:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_lshr_b32 s4, s32, 6
+; WAVE64-OPT-NEXT:    ;;#ASMSTART
+; WAVE64-OPT-NEXT:    ; use s4
+; WAVE64-OPT-NEXT:    ;;#ASMEND
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stacksave_stackrestore_use:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_mov_b32 s4, s32
+; WAVE32-O0-NEXT:    s_lshr_b32 s5, s4, 5
+; WAVE32-O0-NEXT:    ;;#ASMSTART
+; WAVE32-O0-NEXT:    ; use s5
+; WAVE32-O0-NEXT:    ;;#ASMEND
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stacksave_stackrestore_use:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_mov_b32 s4, s32
+; WAVE64-O0-NEXT:    s_lshr_b32 s5, s4, 6
+; WAVE64-O0-NEXT:    ;;#ASMSTART
+; WAVE64-O0-NEXT:    ; use s5
+; WAVE64-O0-NEXT:    ;;#ASMEND
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
+  call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
+  call void @llvm.stackrestore.p5(ptr addrspace(5) %stacksave)
+  ret void
+}
+
+define amdgpu_kernel void @kernel_stacksave_stackrestore_use() {
+; WAVE32-OPT-LABEL: kernel_stacksave_stackrestore_use:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_lshr_b32 s0, s32, 5
+; WAVE32-OPT-NEXT:    ;;#ASMSTART
+; WAVE32-OPT-NEXT:    ; use s0
+; WAVE32-OPT-NEXT:    ;;#ASMEND
+; WAVE32-OPT-NEXT:    s_endpgm
+;
+; WAVE64-OPT-LABEL: kernel_stacksave_stackrestore_use:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_lshr_b32 s0, s32, 6
+; WAVE64-OPT-NEXT:    ;;#ASMSTART
+; WAVE64-OPT-NEXT:    ; use s0
+; WAVE64-OPT-NEXT:    ;;#ASMEND
+; WAVE64-OPT-NEXT:    s_endpgm
+;
+; WAVE32-O0-LABEL: kernel_stacksave_stackrestore_use:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_mov_b32 s0, s32
+; WAVE32-O0-NEXT:    s_lshr_b32 s1, s0, 5
+; WAVE32-O0-NEXT:    ;;#ASMSTART
+; WAVE32-O0-NEXT:    ; use s1
+; WAVE32-O0-NEXT:    ;;#ASMEND
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s0
+; WAVE32-O0-NEXT:    s_endpgm
+;
+; WAVE64-O0-LABEL: kernel_stacksave_stackrestore_use:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_mov_b32 s0, s32
+; WAVE64-O0-NEXT:    s_lshr_b32 s1, s0, 6
+; WAVE64-O0-NEXT:    ;;#ASMSTART
+; WAVE64-O0-NEXT:    ; use s1
+; WAVE64-O0-NEXT:    ;;#ASMEND
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s0
+; WAVE64-O0-NEXT:    s_endpgm
+  %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
+  call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
+  call void @llvm.stackrestore.p5(ptr addrspace(5) %stacksave)
+  ret void
+}
+
+define void @func_stacksave_stackrestore_voffset(i32 %offset) {
+; WAVE32-OPT-LABEL: func_stacksave_stackrestore_voffset:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_lshr_b32 s4, s32, 5
+; WAVE32-OPT-NEXT:    v_add_nc_u32_e32 v0, s4, v0
+; WAVE32-OPT-NEXT:    v_readfirstlane_b32 s4, v0
+; WAVE32-OPT-NEXT:    s_lshl_b32 s32, s4, 5
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stacksave_stackrestore_voffset:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_lshr_b32 s4, s32, 6
+; WAVE64-OPT-NEXT:    v_add_nc_u32_e32 v0, s4, v0
+; WAVE64-OPT-NEXT:    v_readfirstlane_b32 s4, v0
+; WAVE64-OPT-NEXT:    s_lshl_b32 s32, s4, 6
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stacksave_stackrestore_voffset:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_mov_b32 s4, s32
+; WAVE32-O0-NEXT:    s_lshr_b32 s4, s4, 5
+; WAVE32-O0-NEXT:    v_add_nc_u32_e64 v0, s4, v0
+; WAVE32-O0-NEXT:    v_readfirstlane_b32 s4, v0
+; WAVE32-O0-NEXT:    s_lshl_b32 s4, s4, 5
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stacksave_stackrestore_voffset:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_mov_b32 s4, s32
+; WAVE64-O0-NEXT:    s_lshr_b32 s4, s4, 6
+; WAVE64-O0-NEXT:    v_add_nc_u32_e64 v0, s4, v0
+; WAVE64-O0-NEXT:    v_readfirstlane_b32 s4, v0
+; WAVE64-O0-NEXT:    s_lshl_b32 s4, s4, 6
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
+  %gep = getelementptr i8, ptr addrspace(5) %stacksave, i32 %offset
+  call void @llvm.stackrestore.p5(ptr addrspace(5) %gep)
+  ret void
+}
+
+define void @func_stacksave_vgpr(ptr addrspace(5) %stack) {
+; WAVE32-OPT-LABEL: func_stacksave_vgpr:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    v_readfirstlane_b32 s4, v0
+; WAVE32-OPT-NEXT:    s_lshl_b32 s32, s4, 5
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stacksave_vgpr:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    v_readfirstlane_b32 s4, v0
+; WAVE64-OPT-NEXT:    s_lshl_b32 s32, s4, 6
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stacksave_vgpr:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    v_readfirstlane_b32 s4, v0
+; WAVE32-O0-NEXT:    s_lshl_b32 s4, s4, 5
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stacksave_vgpr:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    v_readfirstlane_b32 s4, v0
+; WAVE64-O0-NEXT:    s_lshl_b32 s4, s4, 6
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  call void @llvm.stackrestore.p5(ptr addrspace(5) %stack)
+  ret void
+}
+
+define amdgpu_gfx void @func_stacksave_sgpr(ptr addrspace(5) inreg %stack) {
+; WAVE32-OPT-LABEL: func_stacksave_sgpr:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_lshl_b32 s32, s4, 5
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stacksave_sgpr:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_lshl_b32 s32, s4, 6
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stacksave_sgpr:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_lshl_b32 s34, s4, 5
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s34
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stacksave_sgpr:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_lshl_b32 s34, s4, 6
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s34
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  call void @llvm.stackrestore.p5(ptr addrspace(5) %stack)
+  ret void
+}
+
+define amdgpu_kernel void @kernel_stacksave_sgpr(ptr addrspace(5) %stack) {
+; WAVE32-OPT-LABEL: kernel_stacksave_sgpr:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_load_dword s0, s[0:1], 0x0
+; WAVE32-OPT-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE32-OPT-NEXT:    ;;#ASMSTART
+; WAVE32-OPT-NEXT:    ; use s0
+; WAVE32-OPT-NEXT:    ;;#ASMEND
+; WAVE32-OPT-NEXT:    s_endpgm
+;
+; WAVE64-OPT-LABEL: kernel_stacksave_sgpr:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_load_dword s0, s[0:1], 0x0
+; WAVE64-OPT-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE64-OPT-NEXT:    ;;#ASMSTART
+; WAVE64-OPT-NEXT:    ; use s0
+; WAVE64-OPT-NEXT:    ;;#ASMEND
+; WAVE64-OPT-NEXT:    s_endpgm
+;
+; WAVE32-O0-LABEL: kernel_stacksave_sgpr:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_load_dword s0, s[4:5], 0x0
+; WAVE32-O0-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_mov_b32 s1, s0
+; WAVE32-O0-NEXT:    ;;#ASMSTART
+; WAVE32-O0-NEXT:    ; use s1
+; WAVE32-O0-NEXT:    ;;#ASMEND
+; WAVE32-O0-NEXT:    s_lshl_b32 s0, s0, 5
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s0
+; WAVE32-O0-NEXT:    s_endpgm
+;
+; WAVE64-O0-LABEL: kernel_stacksave_sgpr:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_load_dword s0, s[4:5], 0x0
+; WAVE64-O0-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_mov_b32 s1, s0
+; WAVE64-O0-NEXT:    ;;#ASMSTART
+; WAVE64-O0-NEXT:    ; use s1
+; WAVE64-O0-NEXT:    ;;#ASMEND
+; WAVE64-O0-NEXT:    s_lshl_b32 s0, s0, 6
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s0
+; WAVE64-O0-NEXT:    s_endpgm
+  call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stack)
+  call void @llvm.stackrestore.p5(ptr addrspace(5) %stack)
+  ret void
+}
+
+define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects() {
+; WAVE32-OPT-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_getpc_b64 s[8:9]
+; WAVE32-OPT-NEXT:    s_mov_b32 s8, s0
+; WAVE32-OPT-NEXT:    s_movk_i32 s32, 0x1200
+; WAVE32-OPT-NEXT:    s_load_dwordx4 s[8:11], s[8:9], 0x0
+; WAVE32-OPT-NEXT:    s_mov_b32 s0, s32
+; WAVE32-OPT-NEXT:    v_mov_b32_e32 v0, 42
+; WAVE32-OPT-NEXT:    v_mov_b32_e32 v1, 17
+; WAVE32-OPT-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_bitset0_b32 s11, 21
+; WAVE32-OPT-NEXT:    s_add_u32 s8, s8, s1
+; WAVE32-OPT-NEXT:    s_addc_u32 s9, s9, 0
+; WAVE32-OPT-NEXT:    s_getpc_b64 s[4:5]
+; WAVE32-OPT-NEXT:    s_add_u32 s4, s4, stack_passed_argument at rel32@lo+4
+; WAVE32-OPT-NEXT:    s_addc_u32 s5, s5, stack_passed_argument at rel32@hi+12
+; WAVE32-OPT-NEXT:    s_lshr_b32 s6, s0, 5
+; WAVE32-OPT-NEXT:    s_mov_b64 s[0:1], s[8:9]
+; WAVE32-OPT-NEXT:    s_mov_b64 s[2:3], s[10:11]
+; WAVE32-OPT-NEXT:    buffer_store_dword v0, off, s[8:11], 0 offset:4
+; WAVE32-OPT-NEXT:    s_waitcnt_vscnt null, 0x0
+; WAVE32-OPT-NEXT:    buffer_store_dword v1, off, s[8:11], s32 offset:4
+; WAVE32-OPT-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; WAVE32-OPT-NEXT:    ;;#ASMSTART
+; WAVE32-OPT-NEXT:    ; use s6
+; WAVE32-OPT-NEXT:    ;;#ASMEND
+; WAVE32-OPT-NEXT:    s_endpgm
+;
+; WAVE64-OPT-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_getpc_b64 s[8:9]
+; WAVE64-OPT-NEXT:    s_mov_b32 s8, s0
+; WAVE64-OPT-NEXT:    s_movk_i32 s32, 0x2400
+; WAVE64-OPT-NEXT:    s_load_dwordx4 s[8:11], s[8:9], 0x0
+; WAVE64-OPT-NEXT:    s_mov_b32 s0, s32
+; WAVE64-OPT-NEXT:    v_mov_b32_e32 v0, 42
+; WAVE64-OPT-NEXT:    v_mov_b32_e32 v1, 17
+; WAVE64-OPT-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_add_u32 s8, s8, s1
+; WAVE64-OPT-NEXT:    s_addc_u32 s9, s9, 0
+; WAVE64-OPT-NEXT:    s_getpc_b64 s[4:5]
+; WAVE64-OPT-NEXT:    s_add_u32 s4, s4, stack_passed_argument at rel32@lo+4
+; WAVE64-OPT-NEXT:    s_addc_u32 s5, s5, stack_passed_argument at rel32@hi+12
+; WAVE64-OPT-NEXT:    s_lshr_b32 s6, s0, 6
+; WAVE64-OPT-NEXT:    s_mov_b64 s[0:1], s[8:9]
+; WAVE64-OPT-NEXT:    s_mov_b64 s[2:3], s[10:11]
+; WAVE64-OPT-NEXT:    buffer_store_dword v0, off, s[8:11], 0 offset:4
+; WAVE64-OPT-NEXT:    s_waitcnt_vscnt null, 0x0
+; WAVE64-OPT-NEXT:    buffer_store_dword v1, off, s[8:11], s32 offset:4
+; WAVE64-OPT-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; WAVE64-OPT-NEXT:    ;;#ASMSTART
+; WAVE64-OPT-NEXT:    ; use s6
+; WAVE64-OPT-NEXT:    ;;#ASMEND
+; WAVE64-OPT-NEXT:    s_endpgm
+;
+; WAVE32-O0-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_mov_b32 s32, 0x1200
+; WAVE32-O0-NEXT:    s_getpc_b64 s[20:21]
+; WAVE32-O0-NEXT:    s_mov_b32 s20, s0
+; WAVE32-O0-NEXT:    s_load_dwordx4 s[20:23], s[20:21], 0x0
+; WAVE32-O0-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_bitset0_b32 s23, 21
+; WAVE32-O0-NEXT:    s_add_u32 s20, s20, s11
+; WAVE32-O0-NEXT:    s_addc_u32 s21, s21, 0
+; WAVE32-O0-NEXT:    ; implicit-def: $vgpr3
+; WAVE32-O0-NEXT:    s_mov_b32 s14, s10
+; WAVE32-O0-NEXT:    s_mov_b32 s13, s9
+; WAVE32-O0-NEXT:    s_mov_b32 s12, s8
+; WAVE32-O0-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; WAVE32-O0-NEXT:    s_mov_b64 s[8:9], s[4:5]
+; WAVE32-O0-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; WAVE32-O0-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; WAVE32-O0-NEXT:    s_mov_b32 s0, s32
+; WAVE32-O0-NEXT:    v_writelane_b32 v3, s0, 0
+; WAVE32-O0-NEXT:    s_lshr_b32 s0, s0, 5
+; WAVE32-O0-NEXT:    v_writelane_b32 v3, s0, 1
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s19, -1
+; WAVE32-O0-NEXT:    buffer_store_dword v3, off, s[20:23], 0 offset:132 ; 4-byte Folded Spill
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s19
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v3, 42
+; WAVE32-O0-NEXT:    buffer_store_dword v3, off, s[20:23], 0 offset:4
+; WAVE32-O0-NEXT:    s_waitcnt_vscnt null, 0x0
+; WAVE32-O0-NEXT:    s_mov_b64 s[0:1], s[20:21]
+; WAVE32-O0-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; WAVE32-O0-NEXT:    s_mov_b32 s15, s32
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v3, 17
+; WAVE32-O0-NEXT:    buffer_store_dword v3, off, s[20:23], s15 offset:4
+; WAVE32-O0-NEXT:    s_getpc_b64 s[16:17]
+; WAVE32-O0-NEXT:    s_add_u32 s16, s16, stack_passed_argument at rel32@lo+4
+; WAVE32-O0-NEXT:    s_addc_u32 s17, s17, stack_passed_argument at rel32@hi+12
+; WAVE32-O0-NEXT:    s_mov_b32 s15, 20
+; WAVE32-O0-NEXT:    v_lshlrev_b32_e64 v2, s15, v2
+; WAVE32-O0-NEXT:    s_mov_b32 s15, 10
+; WAVE32-O0-NEXT:    v_lshlrev_b32_e64 v1, s15, v1
+; WAVE32-O0-NEXT:    v_or3_b32 v31, v0, v1, v2
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr15
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v0, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v1, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v2, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v3, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v4, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v5, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v6, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v7, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v8, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v9, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v10, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v11, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v12, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v13, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v14, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v15, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v16, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v17, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v18, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v19, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v20, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v21, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v22, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v23, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v24, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v25, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v26, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v27, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v28, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v29, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v30, s18
+; WAVE32-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s19, -1
+; WAVE32-O0-NEXT:    buffer_load_dword v0, off, s[20:23], 0 offset:132 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s19
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE32-O0-NEXT:    v_readlane_b32 s1, v0, 1
+; WAVE32-O0-NEXT:    v_readlane_b32 s0, v0, 0
+; WAVE32-O0-NEXT:    ;;#ASMSTART
+; WAVE32-O0-NEXT:    ; use s1
+; WAVE32-O0-NEXT:    ;;#ASMEND
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s0
+; WAVE32-O0-NEXT:    ; kill: killed $vgpr0
+; WAVE32-O0-NEXT:    s_endpgm
+;
+; WAVE64-O0-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_mov_b32 s32, 0x2400
+; WAVE64-O0-NEXT:    s_getpc_b64 s[24:25]
+; WAVE64-O0-NEXT:    s_mov_b32 s24, s0
+; WAVE64-O0-NEXT:    s_load_dwordx4 s[24:27], s[24:25], 0x0
+; WAVE64-O0-NEXT:    s_waitcnt lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_add_u32 s24, s24, s11
+; WAVE64-O0-NEXT:    s_addc_u32 s25, s25, 0
+; WAVE64-O0-NEXT:    ; implicit-def: $vgpr3
+; WAVE64-O0-NEXT:    s_mov_b32 s14, s10
+; WAVE64-O0-NEXT:    s_mov_b32 s13, s9
+; WAVE64-O0-NEXT:    s_mov_b32 s12, s8
+; WAVE64-O0-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; WAVE64-O0-NEXT:    s_mov_b64 s[8:9], s[4:5]
+; WAVE64-O0-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; WAVE64-O0-NEXT:    s_mov_b64 s[4:5], s[0:1]
+; WAVE64-O0-NEXT:    s_mov_b32 s0, s32
+; WAVE64-O0-NEXT:    v_writelane_b32 v3, s0, 0
+; WAVE64-O0-NEXT:    s_lshr_b32 s0, s0, 6
+; WAVE64-O0-NEXT:    v_writelane_b32 v3, s0, 1
+; WAVE64-O0-NEXT:    s_or_saveexec_b64 s[20:21], -1
+; WAVE64-O0-NEXT:    buffer_store_dword v3, off, s[24:27], 0 offset:132 ; 4-byte Folded Spill
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[20:21]
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v3, 42
+; WAVE64-O0-NEXT:    buffer_store_dword v3, off, s[24:27], 0 offset:4
+; WAVE64-O0-NEXT:    s_waitcnt_vscnt null, 0x0
+; WAVE64-O0-NEXT:    s_mov_b64 s[0:1], s[24:25]
+; WAVE64-O0-NEXT:    s_mov_b64 s[2:3], s[26:27]
+; WAVE64-O0-NEXT:    s_mov_b32 s15, s32
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v3, 17
+; WAVE64-O0-NEXT:    buffer_store_dword v3, off, s[24:27], s15 offset:4
+; WAVE64-O0-NEXT:    s_getpc_b64 s[16:17]
+; WAVE64-O0-NEXT:    s_add_u32 s16, s16, stack_passed_argument at rel32@lo+4
+; WAVE64-O0-NEXT:    s_addc_u32 s17, s17, stack_passed_argument at rel32@hi+12
+; WAVE64-O0-NEXT:    s_mov_b32 s15, 20
+; WAVE64-O0-NEXT:    v_lshlrev_b32_e64 v2, s15, v2
+; WAVE64-O0-NEXT:    s_mov_b32 s15, 10
+; WAVE64-O0-NEXT:    v_lshlrev_b32_e64 v1, s15, v1
+; WAVE64-O0-NEXT:    v_or3_b32 v31, v0, v1, v2
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr15
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v0, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v1, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v2, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v3, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v4, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v5, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v6, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v7, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v8, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v9, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v10, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v11, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v12, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v13, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v14, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v15, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v16, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v17, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v18, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v19, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v20, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v21, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v22, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v23, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v24, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v25, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v26, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v27, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v28, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v29, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v30, s18
+; WAVE64-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
+; WAVE64-O0-NEXT:    s_or_saveexec_b64 s[20:21], -1
+; WAVE64-O0-NEXT:    buffer_load_dword v0, off, s[24:27], 0 offset:132 ; 4-byte Folded Reload
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[20:21]
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE64-O0-NEXT:    v_readlane_b32 s1, v0, 1
+; WAVE64-O0-NEXT:    v_readlane_b32 s0, v0, 0
+; WAVE64-O0-NEXT:    ;;#ASMSTART
+; WAVE64-O0-NEXT:    ; use s1
+; WAVE64-O0-NEXT:    ;;#ASMEND
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s0
+; WAVE64-O0-NEXT:    ; kill: killed $vgpr0
+; WAVE64-O0-NEXT:    s_endpgm
+  %alloca = alloca [32 x i32], addrspace(5)
+  %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
+  store volatile i32 42, ptr addrspace(5) %alloca
+  call void @stack_passed_argument([32 x i32] poison, i32 17)
+  call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
+  call void @llvm.stackrestore.p5(ptr addrspace(5) %stacksave)
+  ret void
+}
+
+define void @func_stacksave_stackrestore_call_with_stack_objects() {
+; WAVE32-OPT-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
+; WAVE32-OPT:       ; %bb.0:
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-OPT-NEXT:    s_mov_b32 s8, s33
+; WAVE32-OPT-NEXT:    s_mov_b32 s33, s32
+; WAVE32-OPT-NEXT:    s_xor_saveexec_b32 s4, -1
+; WAVE32-OPT-NEXT:    buffer_store_dword v31, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
+; WAVE32-OPT-NEXT:    s_mov_b32 exec_lo, s4
+; WAVE32-OPT-NEXT:    v_writelane_b32 v31, s30, 0
+; WAVE32-OPT-NEXT:    v_mov_b32_e32 v0, 42
+; WAVE32-OPT-NEXT:    v_mov_b32_e32 v1, 17
+; WAVE32-OPT-NEXT:    s_addk_i32 s32, 0x1200
+; WAVE32-OPT-NEXT:    s_getpc_b64 s[4:5]
+; WAVE32-OPT-NEXT:    s_add_u32 s4, s4, stack_passed_argument at rel32@lo+4
+; WAVE32-OPT-NEXT:    s_addc_u32 s5, s5, stack_passed_argument at rel32@hi+12
+; WAVE32-OPT-NEXT:    s_mov_b32 s6, s32
+; WAVE32-OPT-NEXT:    v_writelane_b32 v31, s31, 1
+; WAVE32-OPT-NEXT:    s_lshr_b32 s7, s6, 5
+; WAVE32-OPT-NEXT:    buffer_store_dword v0, off, s[0:3], s33
+; WAVE32-OPT-NEXT:    s_waitcnt_vscnt null, 0x0
+; WAVE32-OPT-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4
+; WAVE32-OPT-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; WAVE32-OPT-NEXT:    ;;#ASMSTART
+; WAVE32-OPT-NEXT:    ; use s7
+; WAVE32-OPT-NEXT:    ;;#ASMEND
+; WAVE32-OPT-NEXT:    s_mov_b32 s32, s6
+; WAVE32-OPT-NEXT:    v_readlane_b32 s31, v31, 1
+; WAVE32-OPT-NEXT:    v_readlane_b32 s30, v31, 0
+; WAVE32-OPT-NEXT:    s_xor_saveexec_b32 s4, -1
+; WAVE32-OPT-NEXT:    buffer_load_dword v31, off, s[0:3], s33 offset:128 ; 4-byte Folded Reload
+; WAVE32-OPT-NEXT:    s_mov_b32 exec_lo, s4
+; WAVE32-OPT-NEXT:    s_addk_i32 s32, 0xee00
+; WAVE32-OPT-NEXT:    s_mov_b32 s33, s8
+; WAVE32-OPT-NEXT:    s_waitcnt vmcnt(0)
+; WAVE32-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-OPT-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
+; WAVE64-OPT:       ; %bb.0:
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-OPT-NEXT:    s_mov_b32 s8, s33
+; WAVE64-OPT-NEXT:    s_mov_b32 s33, s32
+; WAVE64-OPT-NEXT:    s_xor_saveexec_b64 s[4:5], -1
+; WAVE64-OPT-NEXT:    buffer_store_dword v31, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
+; WAVE64-OPT-NEXT:    s_mov_b64 exec, s[4:5]
+; WAVE64-OPT-NEXT:    v_writelane_b32 v31, s30, 0
+; WAVE64-OPT-NEXT:    v_mov_b32_e32 v0, 42
+; WAVE64-OPT-NEXT:    v_mov_b32_e32 v1, 17
+; WAVE64-OPT-NEXT:    s_addk_i32 s32, 0x2400
+; WAVE64-OPT-NEXT:    s_getpc_b64 s[4:5]
+; WAVE64-OPT-NEXT:    s_add_u32 s4, s4, stack_passed_argument at rel32@lo+4
+; WAVE64-OPT-NEXT:    s_addc_u32 s5, s5, stack_passed_argument at rel32@hi+12
+; WAVE64-OPT-NEXT:    s_mov_b32 s6, s32
+; WAVE64-OPT-NEXT:    v_writelane_b32 v31, s31, 1
+; WAVE64-OPT-NEXT:    s_lshr_b32 s7, s6, 6
+; WAVE64-OPT-NEXT:    buffer_store_dword v0, off, s[0:3], s33
+; WAVE64-OPT-NEXT:    s_waitcnt_vscnt null, 0x0
+; WAVE64-OPT-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4
+; WAVE64-OPT-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; WAVE64-OPT-NEXT:    ;;#ASMSTART
+; WAVE64-OPT-NEXT:    ; use s7
+; WAVE64-OPT-NEXT:    ;;#ASMEND
+; WAVE64-OPT-NEXT:    s_mov_b32 s32, s6
+; WAVE64-OPT-NEXT:    v_readlane_b32 s31, v31, 1
+; WAVE64-OPT-NEXT:    v_readlane_b32 s30, v31, 0
+; WAVE64-OPT-NEXT:    s_xor_saveexec_b64 s[4:5], -1
+; WAVE64-OPT-NEXT:    buffer_load_dword v31, off, s[0:3], s33 offset:128 ; 4-byte Folded Reload
+; WAVE64-OPT-NEXT:    s_mov_b64 exec, s[4:5]
+; WAVE64-OPT-NEXT:    s_addk_i32 s32, 0xdc00
+; WAVE64-OPT-NEXT:    s_mov_b32 s33, s8
+; WAVE64-OPT-NEXT:    s_waitcnt vmcnt(0)
+; WAVE64-OPT-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE32-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
+; WAVE32-O0:       ; %bb.0:
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE32-O0-NEXT:    s_mov_b32 s25, s33
+; WAVE32-O0-NEXT:    s_mov_b32 s33, s32
+; WAVE32-O0-NEXT:    s_xor_saveexec_b32 s16, -1
+; WAVE32-O0-NEXT:    buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
+; WAVE32-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Spill
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s16
+; WAVE32-O0-NEXT:    s_add_i32 s32, s32, 0x1200
+; WAVE32-O0-NEXT:    ; implicit-def: $vgpr0
+; WAVE32-O0-NEXT:    v_writelane_b32 v32, s30, 0
+; WAVE32-O0-NEXT:    v_writelane_b32 v32, s31, 1
+; WAVE32-O0-NEXT:    s_mov_b32 s16, s32
+; WAVE32-O0-NEXT:    v_writelane_b32 v0, s16, 0
+; WAVE32-O0-NEXT:    s_lshr_b32 s16, s16, 5
+; WAVE32-O0-NEXT:    v_writelane_b32 v0, s16, 1
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s24, -1
+; WAVE32-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Spill
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s24
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v0, 42
+; WAVE32-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s33
+; WAVE32-O0-NEXT:    s_waitcnt_vscnt null, 0x0
+; WAVE32-O0-NEXT:    s_mov_b64 s[22:23], s[2:3]
+; WAVE32-O0-NEXT:    s_mov_b64 s[20:21], s[0:1]
+; WAVE32-O0-NEXT:    s_mov_b32 s16, s32
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v0, 17
+; WAVE32-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s16 offset:4
+; WAVE32-O0-NEXT:    s_getpc_b64 s[16:17]
+; WAVE32-O0-NEXT:    s_add_u32 s16, s16, stack_passed_argument at rel32@lo+4
+; WAVE32-O0-NEXT:    s_addc_u32 s17, s17, stack_passed_argument at rel32@hi+12
+; WAVE32-O0-NEXT:    s_mov_b64 s[0:1], s[20:21]
+; WAVE32-O0-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v0, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v1, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v2, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v3, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v4, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v5, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v6, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v7, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v8, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v9, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v10, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v11, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v12, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v13, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v14, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v15, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v16, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v17, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v18, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v19, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v20, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v21, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v22, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v23, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v24, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v25, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v26, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v27, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v28, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v29, s18
+; WAVE32-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE32-O0-NEXT:    v_mov_b32_e32 v30, s18
+; WAVE32-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
+; WAVE32-O0-NEXT:    s_or_saveexec_b32 s24, -1
+; WAVE32-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s24
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE32-O0-NEXT:    v_readlane_b32 s5, v0, 1
+; WAVE32-O0-NEXT:    v_readlane_b32 s4, v0, 0
+; WAVE32-O0-NEXT:    ;;#ASMSTART
+; WAVE32-O0-NEXT:    ; use s5
+; WAVE32-O0-NEXT:    ;;#ASMEND
+; WAVE32-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE32-O0-NEXT:    v_readlane_b32 s31, v32, 1
+; WAVE32-O0-NEXT:    v_readlane_b32 s30, v32, 0
+; WAVE32-O0-NEXT:    ; kill: killed $vgpr0
+; WAVE32-O0-NEXT:    s_xor_saveexec_b32 s4, -1
+; WAVE32-O0-NEXT:    buffer_load_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
+; WAVE32-O0-NEXT:    s_mov_b32 exec_lo, s4
+; WAVE32-O0-NEXT:    s_add_i32 s32, s32, 0xffffee00
+; WAVE32-O0-NEXT:    s_mov_b32 s33, s25
+; WAVE32-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE32-O0-NEXT:    s_setpc_b64 s[30:31]
+;
+; WAVE64-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
+; WAVE64-O0:       ; %bb.0:
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; WAVE64-O0-NEXT:    s_mov_b32 s19, s33
+; WAVE64-O0-NEXT:    s_mov_b32 s33, s32
+; WAVE64-O0-NEXT:    s_xor_saveexec_b64 s[16:17], -1
+; WAVE64-O0-NEXT:    buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
+; WAVE64-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Spill
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[16:17]
+; WAVE64-O0-NEXT:    s_add_i32 s32, s32, 0x2400
+; WAVE64-O0-NEXT:    ; implicit-def: $vgpr0
+; WAVE64-O0-NEXT:    v_writelane_b32 v32, s30, 0
+; WAVE64-O0-NEXT:    v_writelane_b32 v32, s31, 1
+; WAVE64-O0-NEXT:    s_mov_b32 s16, s32
+; WAVE64-O0-NEXT:    v_writelane_b32 v0, s16, 0
+; WAVE64-O0-NEXT:    s_lshr_b32 s16, s16, 6
+; WAVE64-O0-NEXT:    v_writelane_b32 v0, s16, 1
+; WAVE64-O0-NEXT:    s_or_saveexec_b64 s[26:27], -1
+; WAVE64-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Spill
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[26:27]
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v0, 42
+; WAVE64-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s33
+; WAVE64-O0-NEXT:    s_waitcnt_vscnt null, 0x0
+; WAVE64-O0-NEXT:    s_mov_b64 s[22:23], s[2:3]
+; WAVE64-O0-NEXT:    s_mov_b64 s[20:21], s[0:1]
+; WAVE64-O0-NEXT:    s_mov_b32 s16, s32
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v0, 17
+; WAVE64-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s16 offset:4
+; WAVE64-O0-NEXT:    s_getpc_b64 s[16:17]
+; WAVE64-O0-NEXT:    s_add_u32 s16, s16, stack_passed_argument at rel32@lo+4
+; WAVE64-O0-NEXT:    s_addc_u32 s17, s17, stack_passed_argument at rel32@hi+12
+; WAVE64-O0-NEXT:    s_mov_b64 s[0:1], s[20:21]
+; WAVE64-O0-NEXT:    s_mov_b64 s[2:3], s[22:23]
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v0, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v1, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v2, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v3, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v4, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v5, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v6, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v7, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v8, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v9, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v10, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v11, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v12, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v13, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v14, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v15, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v16, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v17, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v18, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v19, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v20, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v21, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v22, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v23, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v24, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v25, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v26, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v27, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v28, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v29, s18
+; WAVE64-O0-NEXT:    ; implicit-def: $sgpr18
+; WAVE64-O0-NEXT:    v_mov_b32_e32 v30, s18
+; WAVE64-O0-NEXT:    s_swappc_b64 s[30:31], s[16:17]
+; WAVE64-O0-NEXT:    s_or_saveexec_b64 s[26:27], -1
+; WAVE64-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[26:27]
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE64-O0-NEXT:    v_readlane_b32 s5, v0, 1
+; WAVE64-O0-NEXT:    v_readlane_b32 s4, v0, 0
+; WAVE64-O0-NEXT:    ;;#ASMSTART
+; WAVE64-O0-NEXT:    ; use s5
+; WAVE64-O0-NEXT:    ;;#ASMEND
+; WAVE64-O0-NEXT:    s_mov_b32 s32, s4
+; WAVE64-O0-NEXT:    v_readlane_b32 s31, v32, 1
+; WAVE64-O0-NEXT:    v_readlane_b32 s30, v32, 0
+; WAVE64-O0-NEXT:    ; kill: killed $vgpr0
+; WAVE64-O0-NEXT:    s_xor_saveexec_b64 s[4:5], -1
+; WAVE64-O0-NEXT:    buffer_load_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Reload
+; WAVE64-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
+; WAVE64-O0-NEXT:    s_mov_b64 exec, s[4:5]
+; WAVE64-O0-NEXT:    s_add_i32 s32, s32, 0xffffdc00
+; WAVE64-O0-NEXT:    s_mov_b32 s33, s19
+; WAVE64-O0-NEXT:    s_waitcnt vmcnt(0)
+; WAVE64-O0-NEXT:    s_setpc_b64 s[30:31]
+  %alloca = alloca [32 x i32], addrspace(5)
+  %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
+  store volatile i32 42, ptr addrspace(5) %alloca
+  call void @stack_passed_argument([32 x i32] poison, i32 17)
+  call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
+  call void @llvm.stackrestore.p5(ptr addrspace(5) %stacksave)
+  ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; WAVE32: {{.*}}
+; WAVE64: {{.*}}


        


More information about the llvm-commits mailing list